text
stringlengths 4
1.02M
| meta
dict |
---|---|
def f():
return 4
def g(_):
return 5
def h():
return 6
print(f'no interpolation')
print(f"no interpolation")
print(f"""no interpolation""")
x, y = 1, 2
print(f'{x}')
print(f'{x:08x}')
print(f'a {x} b {y} c')
print(f'a {x:08x} b {y} c')
print(f'a {"hello"} b')
print(f'a {f() + g("foo") + h()} b')
def foo(a, b):
return f'{x}{y}{a}{b}'
print(foo(7, 8))
# PEP-0498 specifies that '\\' and '#' must be disallowed explicitly, whereas
# MicroPython relies on the syntax error as a result of the substitution.
print(f"\\")
print(f'#')
try:
eval("f'{\}'")
except SyntaxError:
print('SyntaxError')
try:
eval("f'{#}'")
except SyntaxError:
print('SyntaxError')
# PEP-0498 specifies that handling of double braces '{{' or '}}' should
# behave like str.format.
print(f'{{}}')
print(f'{{{4*10}}}', '{40}')
# A single closing brace, unlike str.format should raise a syntax error.
# MicroPython instead raises ValueError at runtime from the substitution.
try:
eval("f'{{}'")
except (ValueError, SyntaxError):
# MicroPython incorrectly raises ValueError here.
print('SyntaxError')
| {
"content_hash": "7fadf7e04fbe7a2955ac53b4fb771087",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 77,
"avg_line_length": 21.92156862745098,
"alnum_prop": 0.6305903398926654,
"repo_name": "bvernoux/micropython",
"id": "4f7225fcad8b6d3e4a1f81dd63b01621e8550c6d",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/basics/string_fstring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "50694"
},
{
"name": "C",
"bytes": "19869126"
},
{
"name": "C++",
"bytes": "2489380"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "49218"
},
{
"name": "Objective-C",
"bytes": "8382"
},
{
"name": "Python",
"bytes": "856777"
},
{
"name": "Shell",
"bytes": "6229"
}
],
"symlink_target": ""
} |
"""
PostgreSQL database backend for Django.
Requires psycopg 1: http://initd.org/projects/psycopg1
"""
from django.db.backends import *
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.introspection import DatabaseIntrospection
from django.db.backends.postgresql.operations import DatabaseOperations
from django.db.backends.postgresql.version import get_version
from django.utils.encoding import smart_str, smart_unicode
try:
import psycopg as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class UnicodeCursorWrapper(object):
"""
A thin wrapper around psycopg cursors that allows them to accept Unicode
strings as params.
This is necessary because psycopg doesn't apply any DB quoting to
parameters that are Unicode strings. If a param is Unicode, this will
convert it to a bytestring using database client's encoding before passing
it to psycopg.
All results retrieved from the database are converted into Unicode strings
before being returned to the caller.
"""
def __init__(self, cursor, charset):
self.cursor = cursor
self.charset = charset
def format_params(self, params):
if isinstance(params, dict):
result = {}
charset = self.charset
for key, value in params.items():
result[smart_str(key, charset)] = smart_str(value, charset)
return result
else:
return tuple([smart_str(p, self.charset, True) for p in params])
def execute(self, sql, params=()):
return self.cursor.execute(smart_str(sql, self.charset), self.format_params(params))
def executemany(self, sql, param_list):
new_param_list = [self.format_params(params) for params in param_list]
return self.cursor.executemany(sql, new_param_list)
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
uses_savepoints = True
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient()
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation()
def _cursor(self, settings):
set_tz = False
if self.connection is None:
set_tz = True
if settings.DATABASE_NAME == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify DATABASE_NAME in your Django settings file.")
conn_string = "dbname=%s" % settings.DATABASE_NAME
if settings.DATABASE_USER:
conn_string = "user=%s %s" % (settings.DATABASE_USER, conn_string)
if settings.DATABASE_PASSWORD:
conn_string += " password='%s'" % settings.DATABASE_PASSWORD
if settings.DATABASE_HOST:
conn_string += " host=%s" % settings.DATABASE_HOST
if settings.DATABASE_PORT:
conn_string += " port=%s" % settings.DATABASE_PORT
self.connection = Database.connect(conn_string, **self.options)
self.connection.set_isolation_level(1) # make transactions transparent to all cursors
cursor = self.connection.cursor()
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings.TIME_ZONE])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
cursor.execute("SET client_encoding to 'UNICODE'")
cursor = UnicodeCursorWrapper(cursor, 'utf-8')
return cursor
def typecast_string(s):
"""
Cast all returned strings to unicode strings.
"""
if not s and not isinstance(s, str):
return s
return smart_unicode(s)
# Register these custom typecasts, because Django expects dates/times to be
# in Python's native (standard-library) datetime/time format, whereas psycopg
# use mx.DateTime by default.
try:
Database.register_type(Database.new_type((1082,), "DATE", util.typecast_date))
except AttributeError:
raise Exception("You appear to be using psycopg version 2. Set your DATABASE_ENGINE to 'postgresql_psycopg2' instead of 'postgresql'.")
Database.register_type(Database.new_type((1083,1266), "TIME", util.typecast_time))
Database.register_type(Database.new_type((1114,1184), "TIMESTAMP", util.typecast_timestamp))
Database.register_type(Database.new_type((16,), "BOOLEAN", util.typecast_boolean))
Database.register_type(Database.new_type((1700,), "NUMERIC", util.typecast_decimal))
Database.register_type(Database.new_type(Database.types[1043].values, 'STRING', typecast_string))
| {
"content_hash": "6f72aab3f6e22aa735ec3513365b7411",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 139,
"avg_line_length": 40.27891156462585,
"alnum_prop": 0.651748015537916,
"repo_name": "weigj/django-multidb",
"id": "ad271f2e38e10c434f2ddf90a08875a7e9db6780",
"size": "5921",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "django/db/backends/postgresql/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "77034"
},
{
"name": "Python",
"bytes": "4173202"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''
---
module: gce_net
version_added: "1.5"
short_description: create/destroy GCE networks and firewall rules
description:
- This module can create and destroy Google Compue Engine networks and
firewall rules U(https://developers.google.com/compute/docs/networking).
The I(name) parameter is reserved for referencing a network while the
I(fwname) parameter is used to reference firewall rules.
IPv4 Address ranges must be specified using the CIDR
U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
allowed:
description:
- the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800')
required: false
default: null
aliases: []
ipv4_range:
description:
- the IPv4 address range in CIDR notation for the network
required: false
aliases: ['cidr']
fwname:
description:
- name of the firewall rule
required: false
default: null
aliases: ['fwrule']
name:
description:
- name of the network
required: false
default: null
aliases: []
src_range:
description:
- the source IPv4 address range in CIDR notation
required: false
default: null
aliases: ['src_cidr']
src_tags:
description:
- the source instance tags for creating a firewall rule
required: false
default: null
aliases: []
target_tags:
version_added: "1.9"
description:
- the target instance tags for creating a firewall rule
required: false
default: null
aliases: []
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <[email protected]>
'''
EXAMPLES = '''
# Simple example of creating a new network
- local_action:
module: gce_net
name: privatenet
ipv4_range: '10.240.16.0/24'
# Simple example of creating a new firewall rule
- local_action:
module: gce_net
name: privatenet
fwname: all-web-webproxy
allowed: tcp:80,8080
src_tags: ["web", "proxy"]
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
def format_allowed(allowed):
"""Format the 'allowed' value so that it is GCE compatible."""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
else:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return [return_val]
def main():
module = AnsibleModule(
argument_spec = dict(
allowed = dict(),
ipv4_range = dict(),
fwname = dict(),
name = dict(),
src_range = dict(type='list'),
src_tags = dict(type='list'),
target_tags = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
allowed = module.params.get('allowed')
ipv4_range = module.params.get('ipv4_range')
fwname = module.params.get('fwname')
name = module.params.get('name')
src_range = module.params.get('src_range')
src_tags = module.params.get('src_tags')
target_tags = module.params.get('target_tags')
state = module.params.get('state')
changed = False
json_output = {'state': state}
if state in ['active', 'present']:
network = None
try:
network = gce.ex_get_network(name)
json_output['name'] = name
json_output['ipv4_range'] = network.cidr
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants to create a new network that doesn't yet exist
if name and not network:
if not ipv4_range:
module.fail_json(msg="Missing required 'ipv4_range' parameter",
changed=False)
try:
network = gce.ex_create_network(name, ipv4_range)
json_output['name'] = name
json_output['ipv4_range'] = ipv4_range
changed = True
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fwname:
# user creating a firewall rule
if not allowed and not src_range and not src_tags:
if changed and network:
module.fail_json(
msg="Network created, but missing required " + \
"firewall rule parameter(s)", changed=True)
module.fail_json(
msg="Missing required firewall rule parameter(s)",
changed=False)
allowed_list = format_allowed(allowed)
try:
gce.ex_create_firewall(fwname, allowed_list, network=name,
source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
changed = True
except ResourceExistsError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['fwname'] = fwname
json_output['allowed'] = allowed
json_output['src_range'] = src_range
json_output['src_tags'] = src_tags
json_output['target_tags'] = target_tags
if state in ['absent', 'deleted']:
if fwname:
json_output['fwname'] = fwname
fw = None
try:
fw = gce.ex_get_firewall(fwname)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fw:
gce.ex_destroy_firewall(fw)
changed = True
if name:
json_output['name'] = name
network = None
try:
network = gce.ex_get_network(name)
# json_output['d1'] = 'found network name %s' % name
except ResourceNotFoundError:
# json_output['d2'] = 'not found network name %s' % name
pass
except Exception, e:
# json_output['d3'] = 'error with %s' % name
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if network:
# json_output['d4'] = 'deleting %s' % name
gce.ex_destroy_network(network)
# json_output['d5'] = 'deleted %s' % name
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()
| {
"content_hash": "7b9a16b5a74892131451d7c47255e9e5",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 95,
"avg_line_length": 30.78030303030303,
"alnum_prop": 0.5806054639428994,
"repo_name": "mith1979/ansible_automation",
"id": "bafe6d1d43a2ef220efdc94514a5b77937363dcf",
"size": "8828",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/core/cloud/google/gce_net.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from os.path import abspath
from re import compile as re_compile
from datadog_checks.base import ConfigurationError, is_affirmative
MAX_FILEGAUGE_COUNT = 20
class DirectoryConfig(object):
def __init__(self, instance):
if 'directory' not in instance:
raise ConfigurationError('DirectoryCheck: missing `directory` in config')
directory = instance['directory']
self.abs_directory = abspath(directory)
self.name = instance.get('name', directory)
self.pattern = instance.get('pattern')
exclude_dirs = instance.get('exclude_dirs', [])
self.exclude_dirs_pattern = re_compile('|'.join(exclude_dirs)) if exclude_dirs else None
self.dirs_patterns_full = is_affirmative(instance.get('dirs_patterns_full', False))
self.recursive = is_affirmative(instance.get('recursive', False))
self.dirtagname = instance.get('dirtagname', 'name')
self.filetagname = instance.get('filetagname', 'filename')
self.filegauges = is_affirmative(instance.get('filegauges', False))
self.countonly = is_affirmative(instance.get('countonly', False))
self.ignore_missing = is_affirmative(instance.get('ignore_missing', False))
self.follow_symlinks = is_affirmative(instance.get('follow_symlinks', True))
self.stat_follow_symlinks = is_affirmative(instance.get('stat_follow_symlinks', True))
self.submit_histograms = is_affirmative(instance.get('submit_histograms', True))
self.tags = instance.get('tags', [])
self.max_filegauge_count = instance.get('max_filegauge_count', MAX_FILEGAUGE_COUNT)
| {
"content_hash": "3ca747641ecdc73e5541fae01ba46462",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 96,
"avg_line_length": 52.54838709677419,
"alnum_prop": 0.6893799877225292,
"repo_name": "DataDog/integrations-core",
"id": "0c2bf74ed0b0e2cb4df66eb7a165472a887476eb",
"size": "1744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "directory/datadog_checks/directory/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("soamgr:order")
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name',]
# we already imported User in the view code above, remember?
model = User
def get_context_data(self, **kwargs):
context = super(UserUpdateView, self).get_context_data(**kwargs)
if self.request.user.is_paraplanner:
self.fields += ['account_name','bsb','account_number',]
return context
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
| {
"content_hash": "0ab6ae357a053d81e3d286496da82ef0",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 30.62264150943396,
"alnum_prop": 0.6987060998151571,
"repo_name": "nikkomidoy/project_soa",
"id": "80f7bc544386799bd74ad7efedac7d9cce907efb",
"size": "1647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_soa/users/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1775"
},
{
"name": "HTML",
"bytes": "57470"
},
{
"name": "JavaScript",
"bytes": "68379"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "63453"
},
{
"name": "Shell",
"bytes": "6976"
}
],
"symlink_target": ""
} |
import json
from typing import (
TYPE_CHECKING,
Awaitable,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
TypeVar,
Union,
)
import jsonschema
from jsonschema import FormatChecker
from synapse.api.constants import EduTypes, EventContentFields
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
from synapse.events import EventBase, relation_from_event
from synapse.types import JsonDict, RoomID, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
FILTER_SCHEMA = {
"additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
"senders": {"$ref": "#/definitions/user_id_array"},
"not_senders": {"$ref": "#/definitions/user_id_array"},
# TODO: We don't limit event type values but we probably should...
# check types are valid event types
"types": {"type": "array", "items": {"type": "string"}},
"not_types": {"type": "array", "items": {"type": "string"}},
# MSC3874, filtering /messages.
"org.matrix.msc3874.rel_types": {"type": "array", "items": {"type": "string"}},
"org.matrix.msc3874.not_rel_types": {
"type": "array",
"items": {"type": "string"},
},
},
}
ROOM_FILTER_SCHEMA = {
"additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"not_rooms": {"$ref": "#/definitions/room_id_array"},
"rooms": {"$ref": "#/definitions/room_id_array"},
"ephemeral": {"$ref": "#/definitions/room_event_filter"},
"include_leave": {"type": "boolean"},
"state": {"$ref": "#/definitions/room_event_filter"},
"timeline": {"$ref": "#/definitions/room_event_filter"},
"account_data": {"$ref": "#/definitions/room_event_filter"},
},
}
ROOM_EVENT_FILTER_SCHEMA = {
"additionalProperties": True, # Allow new fields for forward compatibility
"type": "object",
"properties": {
"limit": {"type": "number"},
"senders": {"$ref": "#/definitions/user_id_array"},
"not_senders": {"$ref": "#/definitions/user_id_array"},
"types": {"type": "array", "items": {"type": "string"}},
"not_types": {"type": "array", "items": {"type": "string"}},
"rooms": {"$ref": "#/definitions/room_id_array"},
"not_rooms": {"$ref": "#/definitions/room_id_array"},
"contains_url": {"type": "boolean"},
"lazy_load_members": {"type": "boolean"},
"include_redundant_members": {"type": "boolean"},
"unread_thread_notifications": {"type": "boolean"},
"org.matrix.msc3773.unread_thread_notifications": {"type": "boolean"},
# Include or exclude events with the provided labels.
# cf https://github.com/matrix-org/matrix-doc/pull/2326
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
"org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
# MSC3440, filtering by event relations.
"related_by_senders": {"type": "array", "items": {"type": "string"}},
"related_by_rel_types": {"type": "array", "items": {"type": "string"}},
},
}
USER_ID_ARRAY_SCHEMA = {
"type": "array",
"items": {"type": "string", "format": "matrix_user_id"},
}
ROOM_ID_ARRAY_SCHEMA = {
"type": "array",
"items": {"type": "string", "format": "matrix_room_id"},
}
USER_FILTER_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "schema for a Sync filter",
"type": "object",
"definitions": {
"room_id_array": ROOM_ID_ARRAY_SCHEMA,
"user_id_array": USER_ID_ARRAY_SCHEMA,
"filter": FILTER_SCHEMA,
"room_filter": ROOM_FILTER_SCHEMA,
"room_event_filter": ROOM_EVENT_FILTER_SCHEMA,
},
"properties": {
"presence": {"$ref": "#/definitions/filter"},
"account_data": {"$ref": "#/definitions/filter"},
"room": {"$ref": "#/definitions/room_filter"},
"event_format": {"type": "string", "enum": ["client", "federation"]},
"event_fields": {
"type": "array",
"items": {
"type": "string",
# Don't allow '\\' in event field filters. This makes matching
# events a lot easier as we can then use a negative lookbehind
# assertion to split '\.' If we allowed \\ then it would
# incorrectly split '\\.' See synapse.events.utils.serialize_event
#
# Note that because this is a regular expression, we have to escape
# each backslash in the pattern.
"pattern": r"^((?!\\\\).)*$",
},
},
},
"additionalProperties": True, # Allow new fields for forward compatibility
}
@FormatChecker.cls_checks("matrix_room_id")
def matrix_room_id_validator(room_id: object) -> bool:
return isinstance(room_id, str) and RoomID.is_valid(room_id)
@FormatChecker.cls_checks("matrix_user_id")
def matrix_user_id_validator(user_id: object) -> bool:
return isinstance(user_id, str) and UserID.is_valid(user_id)
class Filtering:
def __init__(self, hs: "HomeServer"):
self._hs = hs
self.store = hs.get_datastores().main
self.DEFAULT_FILTER_COLLECTION = FilterCollection(hs, {})
async def get_user_filter(
self, user_localpart: str, filter_id: Union[int, str]
) -> "FilterCollection":
result = await self.store.get_user_filter(user_localpart, filter_id)
return FilterCollection(self._hs, result)
def add_user_filter(
self, user_localpart: str, user_filter: JsonDict
) -> Awaitable[int]:
self.check_valid_filter(user_filter)
return self.store.add_user_filter(user_localpart, user_filter)
# TODO(paul): surely we should probably add a delete_user_filter or
# replace_user_filter at some point? There's no REST API specified for
# them however
def check_valid_filter(self, user_filter_json: JsonDict) -> None:
"""Check if the provided filter is valid.
This inspects all definitions contained within the filter.
Args:
user_filter_json: The filter
Raises:
SynapseError: If the filter is not valid.
"""
# NB: Filters are the complete json blobs. "Definitions" are an
# individual top-level key e.g. public_user_data. Filters are made of
# many definitions.
try:
jsonschema.validate(
user_filter_json, USER_FILTER_SCHEMA, format_checker=FormatChecker()
)
except jsonschema.ValidationError as e:
raise SynapseError(400, str(e))
# Filters work across events, presence EDUs, and account data.
FilterEvent = TypeVar("FilterEvent", EventBase, UserPresenceState, JsonDict)
class FilterCollection:
def __init__(self, hs: "HomeServer", filter_json: JsonDict):
self._filter_json = filter_json
room_filter_json = self._filter_json.get("room", {})
self._room_filter = Filter(
hs,
{k: v for k, v in room_filter_json.items() if k in ("rooms", "not_rooms")},
)
self._room_timeline_filter = Filter(hs, room_filter_json.get("timeline", {}))
self._room_state_filter = Filter(hs, room_filter_json.get("state", {}))
self._room_ephemeral_filter = Filter(hs, room_filter_json.get("ephemeral", {}))
self._room_account_data = Filter(hs, room_filter_json.get("account_data", {}))
self._presence_filter = Filter(hs, filter_json.get("presence", {}))
self._account_data = Filter(hs, filter_json.get("account_data", {}))
self.include_leave = filter_json.get("room", {}).get("include_leave", False)
self.event_fields = filter_json.get("event_fields", [])
self.event_format = filter_json.get("event_format", "client")
def __repr__(self) -> str:
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
def get_filter_json(self) -> JsonDict:
return self._filter_json
def timeline_limit(self) -> int:
return self._room_timeline_filter.limit
def presence_limit(self) -> int:
return self._presence_filter.limit
def ephemeral_limit(self) -> int:
return self._room_ephemeral_filter.limit
def lazy_load_members(self) -> bool:
return self._room_state_filter.lazy_load_members
def include_redundant_members(self) -> bool:
return self._room_state_filter.include_redundant_members
def unread_thread_notifications(self) -> bool:
return self._room_timeline_filter.unread_thread_notifications
async def filter_presence(
self, events: Iterable[UserPresenceState]
) -> List[UserPresenceState]:
return await self._presence_filter.filter(events)
async def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return await self._account_data.filter(events)
async def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]:
return await self._room_state_filter.filter(
await self._room_filter.filter(events)
)
async def filter_room_timeline(
self, events: Iterable[EventBase]
) -> List[EventBase]:
return await self._room_timeline_filter.filter(
await self._room_filter.filter(events)
)
async def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return await self._room_ephemeral_filter.filter(
await self._room_filter.filter(events)
)
async def filter_room_account_data(
self, events: Iterable[JsonDict]
) -> List[JsonDict]:
return await self._room_account_data.filter(
await self._room_filter.filter(events)
)
def blocks_all_presence(self) -> bool:
return (
self._presence_filter.filters_all_types()
or self._presence_filter.filters_all_senders()
)
def blocks_all_room_ephemeral(self) -> bool:
return (
self._room_ephemeral_filter.filters_all_types()
or self._room_ephemeral_filter.filters_all_senders()
or self._room_ephemeral_filter.filters_all_rooms()
)
def blocks_all_room_timeline(self) -> bool:
return (
self._room_timeline_filter.filters_all_types()
or self._room_timeline_filter.filters_all_senders()
or self._room_timeline_filter.filters_all_rooms()
)
class Filter:
def __init__(self, hs: "HomeServer", filter_json: JsonDict):
self._hs = hs
self._store = hs.get_datastores().main
self.filter_json = filter_json
self.limit = filter_json.get("limit", 10)
self.lazy_load_members = filter_json.get("lazy_load_members", False)
self.include_redundant_members = filter_json.get(
"include_redundant_members", False
)
self.unread_thread_notifications: bool = filter_json.get(
"unread_thread_notifications", False
)
if (
not self.unread_thread_notifications
and hs.config.experimental.msc3773_enabled
):
self.unread_thread_notifications = filter_json.get(
"org.matrix.msc3773.unread_thread_notifications", False
)
self.types = filter_json.get("types", None)
self.not_types = filter_json.get("not_types", [])
self.rooms = filter_json.get("rooms", None)
self.not_rooms = filter_json.get("not_rooms", [])
self.senders = filter_json.get("senders", None)
self.not_senders = filter_json.get("not_senders", [])
self.contains_url = filter_json.get("contains_url", None)
self.labels = filter_json.get("org.matrix.labels", None)
self.not_labels = filter_json.get("org.matrix.not_labels", [])
self.related_by_senders = filter_json.get("related_by_senders", None)
self.related_by_rel_types = filter_json.get("related_by_rel_types", None)
# For compatibility with _check_fields.
self.rel_types = None
self.not_rel_types = []
if hs.config.experimental.msc3874_enabled:
self.rel_types = filter_json.get("org.matrix.msc3874.rel_types", None)
self.not_rel_types = filter_json.get("org.matrix.msc3874.not_rel_types", [])
def filters_all_types(self) -> bool:
return "*" in self.not_types
def filters_all_senders(self) -> bool:
return "*" in self.not_senders
def filters_all_rooms(self) -> bool:
return "*" in self.not_rooms
def _check(self, event: FilterEvent) -> bool:
"""Checks whether the filter matches the given event.
Args:
event: The event, account data, or presence to check against this
filter.
Returns:
True if the event matches the filter.
"""
# We usually get the full "events" as dictionaries coming through,
# except for presence which actually gets passed around as its own type.
if isinstance(event, UserPresenceState):
user_id = event.user_id
field_matchers = {
"senders": lambda v: user_id == v,
"types": lambda v: EduTypes.PRESENCE == v,
}
return self._check_fields(field_matchers)
else:
content = event.get("content")
# Content is assumed to be a mapping below, so ensure it is. This should
# always be true for events, but account_data has been allowed to
# have non-dict content.
if not isinstance(content, Mapping):
content = {}
sender = event.get("sender", None)
if not sender:
# Presence events had their 'sender' in content.user_id, but are
# now handled above. We don't know if anything else uses this
# form. TODO: Check this and probably remove it.
sender = content.get("user_id")
room_id = event.get("room_id", None)
ev_type = event.get("type", None)
# check if there is a string url field in the content for filtering purposes
labels = content.get(EventContentFields.LABELS, [])
# Check if the event has a relation.
rel_type = None
if isinstance(event, EventBase):
relation = relation_from_event(event)
if relation:
rel_type = relation.rel_type
field_matchers = {
"rooms": lambda v: room_id == v,
"senders": lambda v: sender == v,
"types": lambda v: _matches_wildcard(ev_type, v),
"labels": lambda v: v in labels,
"rel_types": lambda v: rel_type == v,
}
result = self._check_fields(field_matchers)
if not result:
return result
contains_url_filter = self.contains_url
if contains_url_filter is not None:
contains_url = isinstance(content.get("url"), str)
if contains_url_filter != contains_url:
return False
return True
def _check_fields(self, field_matchers: Dict[str, Callable[[str], bool]]) -> bool:
"""Checks whether the filter matches the given event fields.
Args:
field_matchers: A map of attribute name to callable to use for checking
particular fields.
The attribute name and an inverse (not_<attribute name>) must
exist on the Filter.
The callable should return true if the event's value matches the
filter's value.
Returns:
True if the event fields match
"""
for name, match_func in field_matchers.items():
# If the event matches one of the disallowed values, reject it.
not_name = "not_%s" % (name,)
disallowed_values = getattr(self, not_name)
if any(map(match_func, disallowed_values)):
return False
# Other the event does not match at least one of the allowed values,
# reject it.
allowed_values = getattr(self, name)
if allowed_values is not None:
if not any(map(match_func, allowed_values)):
return False
# Otherwise, accept it.
return True
def filter_rooms(self, room_ids: Iterable[str]) -> Set[str]:
"""Apply the 'rooms' filter to a given list of rooms.
Args:
room_ids: A list of room_ids.
Returns:
A list of room_ids that match the filter
"""
room_ids = set(room_ids)
disallowed_rooms = set(self.not_rooms)
room_ids -= disallowed_rooms
allowed_rooms = self.rooms
if allowed_rooms is not None:
room_ids &= set(allowed_rooms)
return room_ids
async def _check_event_relations(
self, events: Collection[FilterEvent]
) -> List[FilterEvent]:
# The event IDs to check, mypy doesn't understand the isinstance check.
event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined]
event_ids_to_keep = set(
await self._store.events_have_relations(
event_ids, self.related_by_senders, self.related_by_rel_types
)
)
return [
event
for event in events
if not isinstance(event, EventBase) or event.event_id in event_ids_to_keep
]
async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
result = [event for event in events if self._check(event)]
if self.related_by_senders or self.related_by_rel_types:
return await self._check_event_relations(result)
return result
def with_room_ids(self, room_ids: Iterable[str]) -> "Filter":
"""Returns a new filter with the given room IDs appended.
Args:
room_ids: The room_ids to add
Returns:
filter: A new filter including the given rooms and the old
filter's rooms.
"""
newFilter = Filter(self._hs, self.filter_json)
newFilter.rooms += room_ids
return newFilter
def _matches_wildcard(actual_value: Optional[str], filter_value: str) -> bool:
if filter_value.endswith("*") and isinstance(actual_value, str):
type_prefix = filter_value[:-1]
return actual_value.startswith(type_prefix)
else:
return actual_value == filter_value
| {
"content_hash": "8ea1c682b5a40ca06ac40810c4e6c051",
"timestamp": "",
"source": "github",
"line_count": 512,
"max_line_length": 118,
"avg_line_length": 37.099609375,
"alnum_prop": 0.5910502763885233,
"repo_name": "matrix-org/synapse",
"id": "a9888381b4512988eb3dc0f3cc19e3444fe7e839",
"size": "19709",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "synapse/api/filtering.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7229"
},
{
"name": "Dockerfile",
"bytes": "9316"
},
{
"name": "Gherkin",
"bytes": "441"
},
{
"name": "HTML",
"bytes": "66000"
},
{
"name": "JavaScript",
"bytes": "15635"
},
{
"name": "Jinja",
"bytes": "7687"
},
{
"name": "Lua",
"bytes": "241"
},
{
"name": "Perl",
"bytes": "28191"
},
{
"name": "Python",
"bytes": "10632037"
},
{
"name": "Rust",
"bytes": "57034"
},
{
"name": "Shell",
"bytes": "53124"
}
],
"symlink_target": ""
} |
import os
import re
import sys
from setuptools import setup
execfile(os.path.join("openobject", "release.py"))
version_dash_incompatible = False
if 'bdist_rpm' in sys.argv:
version_dash_incompatible = True
try:
import py2exe
from py2exe_utils import opts
version_dash_incompatible = True
except ImportError:
opts = {}
if version_dash_incompatible:
version = version.split('-')[0]
FILE_PATTERNS = \
r'.+\.(py|cfg|po|pot|mo|txt|rst|gif|png|jpg|ico|mako|html|js|css|htc|swf)$'
def find_data_files(source, patterns=FILE_PATTERNS):
file_matcher = re.compile(patterns, re.I)
out = []
for base, _, files in os.walk(source):
cur_files = []
for f in files:
if file_matcher.match(f):
cur_files.append(os.path.join(base, f))
if cur_files:
out.append(
(base, cur_files))
return out
setup(
name=name,
version=version,
description=description,
long_description=long_description,
author=author,
author_email=author_email,
url=url,
download_url=download_url,
license=license,
install_requires=[
"CherryPy == 3.1.2",
"Mako >= 0.2.4",
"Babel >= 0.9.5",
"FormEncode >= 1.2.2",
"simplejson >= 2.0.9",
"python-dateutil >= 1.4.1",
"pytz >= 2009j"
],
zip_safe=False,
packages=[
'openobject',
'openobject.admin',
'openobject.admin.i18n',
'openobject.controllers',
'openobject.i18n',
'openobject.test',
'openobject.tools',
'openobject.widgets'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Environment :: Web Environment',
'Topic :: Office/Business :: Financial',
],
scripts=['scripts/openerp-web'],
data_files=(find_data_files('addons/openerp')
+ find_data_files('addons/view_calendar')
+ find_data_files('addons/view_diagram')
+ find_data_files('addons/view_graph')
+ find_data_files('addons/widget_ckeditor')
+ find_data_files('doc', patterns='')
+ find_data_files('openobject', patterns=r'.+\.(cfg|css|js|mako|gif|png|jpg|ico)')
+ opts.pop('data_files', [])
),
**opts
)
| {
"content_hash": "55fb5a56156010178d18a1c350406db0",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 96,
"avg_line_length": 28.376470588235293,
"alnum_prop": 0.5762852404643449,
"repo_name": "inafev/openerpappliance",
"id": "6c1831645667273cbf5a9a6afadf8fde595d5de1",
"size": "2412",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "openerp6.0.3-appliance-desktop/overlay/home/openerp/production/openerp-web/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "58383"
},
{
"name": "Python",
"bytes": "24112"
},
{
"name": "Shell",
"bytes": "1187901"
}
],
"symlink_target": ""
} |
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class PlugLayoutTest( GafferUITest.TestCase ) :
def testRenamingPlugs( self ) :
n = Gaffer.Node()
n["a"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
ui = GafferUI.PlugLayout( n )
w = ui.plugValueWidget( n["a"], lazy=False )
self.assertTrue( w is not None )
self.assertTrue( w.getPlug().isSame( n["a"] ) )
n["a"].setName( "b" )
w2 = ui.plugValueWidget( n["b"], lazy=False )
self.assertTrue( w2 is not None )
self.assertTrue( w2 is w )
self.assertTrue( w2.getPlug().isSame( n["b"] ) )
def testLayoutOrder( self ) :
n = Gaffer.Node()
n["user"]["a"] = Gaffer.IntPlug()
n["user"]["b"] = Gaffer.IntPlug()
n["user"]["c"] = Gaffer.IntPlug()
self.assertEqual(
GafferUI.PlugLayout.layoutOrder( n["user"] ),
[ n["user"]["a"], n["user"]["b"], n["user"]["c"] ],
)
Gaffer.Metadata.registerPlugValue( n["user"]["a"], "layout:index", 3 )
Gaffer.Metadata.registerPlugValue( n["user"]["b"], "layout:index", 2 )
Gaffer.Metadata.registerPlugValue( n["user"]["c"], "layout:index", 1 )
self.assertEqual(
GafferUI.PlugLayout.layoutOrder( n["user"] ),
[ n["user"]["c"], n["user"]["b"], n["user"]["a"] ],
)
class CustomWidget( GafferUI.Widget ) :
def __init__( self, node ) :
GafferUI.Widget.__init__( self, GafferUI.Label( "Custom Widget" ) )
self.node = node
def testCustomWidgets( self ) :
n = Gaffer.Node()
Gaffer.Metadata.registerNodeValue( n, "layout:customWidget:test:widgetType", "GafferUITest.PlugLayoutTest.CustomWidget" )
p = GafferUI.PlugLayout( n )
self.assertTrue( isinstance( p.customWidget( "test", lazy = False ), self.CustomWidget ) )
self.assertTrue( p.customWidget( "test" ).node.isSame( n ) )
def testLazyBuilding( self ) :
n = Gaffer.Node()
n["a"] = Gaffer.IntPlug()
with GafferUI.Window() as window :
plugLayout = GafferUI.PlugLayout( n )
self.assertTrue( plugLayout.plugValueWidget( n["a"], lazy = True ) is None )
window.setVisible( True )
self.assertTrue( plugLayout.plugValueWidget( n["a"], lazy = True ) is not None )
def testSectionQueries( self ) :
n = Gaffer.Node()
n["user"]["a"] = Gaffer.IntPlug()
n["user"]["b"] = Gaffer.IntPlug()
n["user"]["c"] = Gaffer.IntPlug()
self.assertEqual( GafferUI.PlugLayout.layoutSections( n["user"] ), [ "" ] )
Gaffer.Metadata.registerPlugValue( n["user"]["a"], "layout:section", "A" )
Gaffer.Metadata.registerPlugValue( n["user"]["b"], "layout:section", "B" )
Gaffer.Metadata.registerPlugValue( n["user"]["c"], "layout:section", "C" )
self.assertEqual( GafferUI.PlugLayout.layoutSections( n["user"] ), [ "A", "B", "C" ] )
Gaffer.Metadata.registerPlugValue( n["user"]["a"], "layout:index", 3 )
self.assertEqual( GafferUI.PlugLayout.layoutSections( n["user"] ), [ "B", "C", "A" ] )
def testLayoutOrderSectionArgument( self ) :
n = Gaffer.Node()
n["user"]["a"] = Gaffer.IntPlug()
n["user"]["b"] = Gaffer.IntPlug()
n["user"]["c"] = Gaffer.IntPlug()
self.assertEqual(
GafferUI.PlugLayout.layoutOrder( n["user"], section = "" ),
[ n["user"]["a"], n["user"]["b"], n["user"]["c"] ],
)
Gaffer.Metadata.registerPlugValue( n["user"]["a"], "layout:section", "AB" )
Gaffer.Metadata.registerPlugValue( n["user"]["b"], "layout:section", "AB" )
Gaffer.Metadata.registerPlugValue( n["user"]["c"], "layout:section", "C" )
self.assertEqual(
GafferUI.PlugLayout.layoutOrder( n["user"], section = "AB" ),
[ n["user"]["a"], n["user"]["b"] ],
)
self.assertEqual(
GafferUI.PlugLayout.layoutOrder( n["user"], section = "C" ),
[ n["user"]["c"] ],
)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "2e28ac578afde7d53ca724b6927c820c",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 123,
"avg_line_length": 29.951612903225808,
"alnum_prop": 0.6303177167474421,
"repo_name": "goddardl/gaffer",
"id": "03296036ac8c1bf0be94ed95262c78ab924f3775",
"size": "5517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferUITest/PlugLayoutTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2228"
},
{
"name": "C++",
"bytes": "4178625"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Python",
"bytes": "4152621"
},
{
"name": "Shell",
"bytes": "8787"
},
{
"name": "Slash",
"bytes": "36371"
}
],
"symlink_target": ""
} |
import errno
import json
import os
import subprocess
import sys
from collections import defaultdict
MISC_XML = """<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component
name="ProjectRootManager"
version="2"
languageLevel="%(java_language_level)s"
assert-keyword="true"
jdk-15="true"
project-jdk-name="%(project_jdk_name)s"
project-jdk-type="%(project_jdk_type)s">
%(project_output_url)s
</component>
</project>"""
MODULE_XML_START = """<?xml version="1.0" encoding="UTF-8"?>
<module type="%(type)s" version="4">"""
MODULE_XML_END = """
</module>
"""
ANDROID_FACET = """
<component name="FacetManager">
<facet type="android" name="Android">
<configuration>
<option name="ENABLE_SOURCES_AUTOGENERATION" value="%(enable_sources_autogeneration)s" />
<option name="GEN_FOLDER_RELATIVE_PATH_APT" value="%(module_gen_path)s" />
<option name="GEN_FOLDER_RELATIVE_PATH_AIDL" value="%(module_gen_path)s" />
<option name="MANIFEST_FILE_RELATIVE_PATH" value="%(android_manifest)s" />
<option name="RES_FOLDER_RELATIVE_PATH" value="%(res)s" />
<option name="ASSETS_FOLDER_RELATIVE_PATH" value="%(asset_folder)s" />
<option name="LIBS_FOLDER_RELATIVE_PATH" value="%(libs_path)s" />
<option name="USE_CUSTOM_APK_RESOURCE_FOLDER" value="false" />
<option name="CUSTOM_APK_RESOURCE_FOLDER" value="" />
<option name="USE_CUSTOM_COMPILER_MANIFEST" value="false" />
<option name="CUSTOM_COMPILER_MANIFEST" value="" />
<option name="APK_PATH" value="%(apk_path)s" />
<option name="LIBRARY_PROJECT" value="%(is_android_library_project)s" />
<option name="RUN_PROCESS_RESOURCES_MAVEN_TASK" value="true" />
<option name="GENERATE_UNSIGNED_APK" value="false" />
<option name="CUSTOM_DEBUG_KEYSTORE_PATH" value="%(keystore)s" />
<option name="PACK_TEST_CODE" value="false" />
<option name="RUN_PROGUARD" value="%(run_proguard)s" />
<option name="PROGUARD_CFG_PATH" value="%(proguard_config)s" />
<resOverlayFolders />
<includeSystemProguardFile>false</includeSystemProguardFile>
<includeAssetsFromLibraries>true</includeAssetsFromLibraries>
<additionalNativeLibs />
</configuration>
</facet>
</component>"""
ALL_MODULES_XML_START = """<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>"""
ALL_MODULES_XML_END = """
</modules>
</component>
</project>
"""
AAR_XML_START = """<component name="libraryTable">
<library name="%(name)s">
<CLASSES>
<root url="jar://$PROJECT_DIR$/%(binary_jar)s!/" />"""
AAR_XML_RESOURCE = """
<root url="file://$PROJECT_DIR$/%(resource_path)s/" />"""
AAR_XML_END = """
</CLASSES>
</library>
</component>
"""
LIBRARY_XML_START = """<component name="libraryTable">
<library name="%(name)s">
<CLASSES>
<root url="jar://$PROJECT_DIR$/%(binary_jar)s!/" />
</CLASSES>"""
LIBRARY_XML_WITH_JAVADOC = """
<JAVADOC>
<root url="%(javadoc_url)s" />
</JAVADOC>"""
LIBRARY_XML_NO_JAVADOC = """
<JAVADOC />"""
LIBRARY_XML_WITH_SOURCES = """
<SOURCES>
<root url="jar://$PROJECT_DIR$/%(source_jar)s!/" />
</SOURCES>"""
LIBRARY_XML_NO_SOURCES = """
<SOURCES />"""
LIBRARY_XML_END = """
</library>
</component>
"""
RUN_CONFIG_XML_START = """<component name="ProjectRunConfigurationManager">"""
RUN_CONFIG_XML_END = "</component>"
REMOTE_RUN_CONFIG_XML = """
<configuration default="false" name="%(name)s" type="Remote" factoryName="Remote">
<option name="USE_SOCKET_TRANSPORT" value="true" />
<option name="SERVER_MODE" value="false" />
<option name="SHMEM_ADDRESS" value="javadebug" />
<option name="HOST" value="localhost" />
<option name="PORT" value="5005" />
<RunnerSettings RunnerId="Debug">
<option name="DEBUG_PORT" value="5005" />
<option name="TRANSPORT" value="0" />
<option name="LOCAL" value="false" />
</RunnerSettings>
<ConfigurationWrapper RunnerId="Debug" />
<method />
</configuration>
"""
# Files that were written by this script.
# If `buck project` is working properly, most of the time it will be a no-op
# and no files will need to be written.
MODIFIED_FILES = []
# Files that are part of the project being run. We will delete all .iml files
# that are not checked in and not in this set.
PROJECT_FILES = set()
# Marker for a directory in the module tree that contains an .iml file.
# Intentionally chosen to be an illegal file name in both unix and windows.
CONTAINS_IML_MARKER = '/*contains_iml*/'
def tree():
""" Create an autovivification tree """
return defaultdict(tree)
def create_additional_excludes(modules):
"""Create set of directories to also be excluded."""
# Tree representation of all modules.
module_tree = tree()
additional_excludes = defaultdict(list)
for module in modules:
normalized_iml = os.path.dirname(os.path.normpath(
module['pathToImlFile']))
# Add this path to our build tree
current_directory = module_tree
if normalized_iml:
for part in normalized_iml.split(os.path.sep):
current_directory = current_directory[part]
current_directory[CONTAINS_IML_MARKER] = module
for root, dirs, _files in os.walk('.', topdown=True, followlinks=True):
current_directory = module_tree
normalized_root = os.path.normpath(root)
if normalized_root == '.':
continue
highest_iml_file = None
for part in normalized_root.split(os.path.sep):
if CONTAINS_IML_MARKER in current_directory:
module = current_directory[CONTAINS_IML_MARKER]
found_relevant_source_folder = False
for source_folder in module['sourceFolders']:
# If we find a module that specifies the directory as the
# source folder, then keep all folders under that module.
#
# TODO(royw): Be smarter here and actually keep track of
# the additional directories being tracked by sub modules.
if source_folder['url'] != 'file://$MODULE_DIR$/gen':
found_relevant_source_folder = True
break
# If we found a module containing subdirectories as
# sourceFolders, bail on trying to find a higher IML file.
if found_relevant_source_folder:
break
highest_iml_file = module['pathToImlFile']
if part not in current_directory:
if part != 'res' and highest_iml_file:
additional_excludes[highest_iml_file].append(
normalized_root)
dirs[:] = []
break
else:
current_directory = current_directory[part]
return additional_excludes
def get_path_from_map(map, key, fallback=None):
if key in map:
return '/' + map[key]
if None != fallback:
return '/' + fallback
return ''
def write_modules(modules, generate_minimum_project, android_auto_generation_disabled):
"""Writes one XML file for each module."""
additional_excludes = defaultdict(list)
if generate_minimum_project:
additional_excludes = create_additional_excludes(modules)
for module in modules:
# Build up the XML.
module_type = 'JAVA_MODULE'
if 'isIntelliJPlugin' in module and module['isIntelliJPlugin']:
module_type = 'PLUGIN_MODULE'
xml = MODULE_XML_START % {
'type': module_type,
}
# Android facet, if appropriate.
if module.get('hasAndroidFacet') is True:
if 'keystorePath' in module:
keystore = 'file://$MODULE_DIR$/%s' % module['keystorePath']
else:
keystore = ''
android_manifest = get_path_from_map(module, 'androidManifest', 'AndroidManifest.xml')
res_folder = get_path_from_map(module, 'resFolder', 'res')
asset_folder = get_path_from_map(module, 'assetFolder', 'assets')
is_library_project = module['isAndroidLibraryProject']
android_params = {
'android_manifest': android_manifest,
'res': res_folder,
'asset_folder': asset_folder,
'is_android_library_project': str(is_library_project).lower(),
'run_proguard': 'false',
'module_gen_path': get_path_from_map(module, 'moduleGenPath'),
'proguard_config': '/proguard.cfg',
'keystore': keystore,
'libs_path': '/%s' % module.get('nativeLibs', 'libs'),
'apk_path': get_path_from_map(module, 'binaryPath'),
}
if android_auto_generation_disabled:
android_params['enable_sources_autogeneration'] = 'false'
else:
android_params['enable_sources_autogeneration'] = 'true'
xml += ANDROID_FACET % android_params
# Source code and libraries component.
xml += '\n <component name="NewModuleRootManager" inherit-compiler-output="true">'
# Empirically, if there are multiple source folders, then the
# <content> element for the buck-out/android/gen folder should be
# listed before the other source folders.
num_source_folders = len(module['sourceFolders'])
if num_source_folders > 1 and module['hasAndroidFacet']:
xml = add_buck_android_source_folder(xml, module)
# Source folders.
xml += '\n <content url="file://$MODULE_DIR$">'
for source_folder in module['sourceFolders']:
if 'packagePrefix' in source_folder:
package_prefix = 'packagePrefix="%s" ' % source_folder['packagePrefix']
else:
package_prefix = ''
xml += '\n <sourceFolder url="%(url)s" isTestSource="%(is_test_source)s" %(package_prefix)s/>' % {
'url': source_folder['url'],
'is_test_source': str(source_folder['isTestSource']).lower(),
'package_prefix': package_prefix
}
for exclude_folder in module['excludeFolders']:
xml += '\n <excludeFolder url="%s" />' % exclude_folder['url']
for exclude_folder in sorted(additional_excludes[module['pathToImlFile']]):
normalized_dir = os.path.dirname(os.path.normpath(
module['pathToImlFile']))
xml += '\n <excludeFolder url="file://$MODULE_DIR$/%s" />' % os.path.relpath(exclude_folder, normalized_dir)
xml += '\n </content>'
xml = add_annotation_generated_source_folder(xml, module)
# Empirically, if there is one source folder, then the <content>
# element for the buck-out/android/gen folder should be listed after
# the other source folders.
if num_source_folders <= 1 and module['hasAndroidFacet']:
xml = add_buck_android_source_folder(xml, module)
# Dependencies.
dependencies = module['dependencies']
module_name = module['name']
# We need to filter out some of the modules in the dependency list:
# (1) The module may list itself as a dependency with scope="TEST",
# which is bad.
# (2) The module may list another module as a dependency with both
# COMPILE and TEST scopes, in which case the COMPILE scope should
# win.
# compile_dependencies will be the set of names of dependent modules
# that do not have scope="TEST"
compile_dependencies = filter(
lambda dep: dep['type'] == 'module' and
((not ('scope' in dep)) or dep['scope'] != 'TEST'),
dependencies)
compile_dependencies = map(
lambda dep: dep['moduleName'], compile_dependencies)
compile_dependencies = set(compile_dependencies)
# Filter dependencies to satisfy (1) and (2) defined above.
filtered_dependencies = []
for dep in dependencies:
if dep['type'] != 'module':
# Non-module dependencies should still be included.
filtered_dependencies.append(dep)
else:
# dep must be a module
dep_module_name = dep['moduleName']
if dep_module_name == module_name:
# Exclude self-references!
continue
elif 'scope' in dep and dep['scope'] == 'TEST':
# If this is a scope="TEST" module and the module is going
# to be included as a scope="COMPILE" module, then exclude
# it.
if not (dep_module_name in compile_dependencies):
filtered_dependencies.append(dep)
else:
# Non-test modules should still be included.
filtered_dependencies.append(dep)
# Now that we have filtered the dependencies, we can convert the
# remaining ones directly into XML.
excluded_deps_names = set()
if module_type == 'PLUGIN_MODULE':
# all the jars below are parts of IntelliJ SDK and even though they
# are required for language plugins to work standalone, they cannot
# be included as the plugin module dependency because they would
# clash with IntelliJ
excluded_deps_names = set([
'annotations', # org/intellij/lang/annotations, org/jetbrains/annotations
'extensions', # com/intellij/openapi/extensions/
'idea', # org/intellij, com/intellij
'jdom', # org/jdom
'junit', # junit/
'light_psi_all', # light psi library
'openapi', # com/intellij/openapi
'picocontainer', # org/picocontainer
'trove4j', # gnu/trove
'util', # com/intellij/util
])
for dep in filtered_dependencies:
if 'scope' in dep:
dep_scope = 'scope="%s" ' % dep['scope']
else:
dep_scope = ''
dep_type = dep['type']
if dep_type == 'library':
if dep['name'] in excluded_deps_names:
continue
xml += '\n <orderEntry type="library" exported="" %sname="%s" level="project" />' % (dep_scope, dep['name'])
elif dep_type == 'module':
dep_module_name = dep['moduleName']
# TODO(mbolin): Eliminate this special-case for jackson. It
# exists because jackson is not an ordinary module: it is a
# module that functions as a library. Project.java should add
# it as such in project.json to eliminate this special case.
if dep_module_name == 'module_first_party_orca_third_party_jackson':
exported = 'exported="" '
else:
exported = ''
xml += '\n <orderEntry type="module" module-name="%s" %s%s/>' % (dep_module_name, exported, dep_scope)
elif dep_type == 'inheritedJdk':
xml += '\n <orderEntry type="inheritedJdk" />'
elif dep_type == 'jdk':
xml += '\n <orderEntry type="jdk" jdkName="%s" jdkType="%s" />' % (dep['jdkName'], dep['jdkType'])
elif dep_type == 'sourceFolder':
xml += '\n <orderEntry type="sourceFolder" forTests="false" />'
# Close source code and libraries component.
xml += '\n </component>'
# Close XML.
xml += MODULE_XML_END
# Write the module to a file.
write_file_if_changed(module['pathToImlFile'], xml)
def add_buck_android_source_folder(xml, module):
# Apparently if we write R.java and friends to a gen/ directory under
# buck-out/android/ then IntelliJ wants that to be included as a separate
# source root.
if 'moduleGenPath' in module:
xml += '\n <content url="file://$MODULE_DIR$/%s">' % module['moduleGenPath']
xml += '\n <sourceFolder url="file://$MODULE_DIR$/%s" isTestSource="false" />'\
% module['moduleGenPath']
xml += '\n </content>'
if 'moduleRJavaPath' in module:
xml += '\n <content url="file://$MODULE_DIR$/%s">' % module['moduleRJavaPath']
xml += '\n <sourceFolder '
xml += 'url="file://$MODULE_DIR$/%s" ' % module['moduleRJavaPath']
xml += 'isTestSource="false" />'
xml += '\n </content>'
return xml
def add_annotation_generated_source_folder(xml, module):
if 'annotationGenPath' in module:
annotation_gen_is_for_test = ('annotationGenIsForTest' in module and
module['annotationGenIsForTest'])
is_test_source = str(annotation_gen_is_for_test).lower()
xml += '\n <content url="file://$MODULE_DIR$/%s">' % module['annotationGenPath']
xml += '\n <sourceFolder url="file://$MODULE_DIR$/%s" isTestSource="%s" />'\
% (module['annotationGenPath'], is_test_source)
xml += '\n </content>'
return xml
def write_all_modules(modules):
"""Writes a modules.xml file that defines all of the modules in the project."""
# Build up the XML.
xml = ALL_MODULES_XML_START
# Alpha-sort modules by path before writing them out.
# This ensures that the ordering within modules.xml is stable.
modules.sort(key=lambda module: module['pathToImlFile'])
for module in modules:
relative_path = module['pathToImlFile']
xml += '\n <module fileurl="file://$PROJECT_DIR$/%s" filepath="$PROJECT_DIR$/%s" %s/>' % (
relative_path,
relative_path,
'group="modules"' if not module['isRootModule'] else '')
xml += ALL_MODULES_XML_END
# Write the modules to a file.
write_file_if_changed('.idea/modules.xml', xml)
def write_misc_file(java_settings):
"""Writes a misc.xml file to define some settings specific to the project."""
output_url = '<output url="file://$PROJECT_DIR$/' + \
java_settings.get('outputUrl', 'build-ij/classes') + '" />'
xml = MISC_XML % {
'java_language_level': java_settings.get('languageLevel', 'JDK_1_6'),
'project_jdk_name': java_settings.get('jdkName', 'Android API 21 Platform'),
'project_jdk_type': java_settings.get('jdkType', 'Android SDK'),
'project_output_url': output_url
}
write_file_if_changed('.idea/misc.xml', xml)
def write_aars(aars):
"""Writes an XML file to define each prebuilt aar."""
mkdir_p('.idea/libraries')
for aar in aars:
# Build up the XML.
name = aar['name']
xml = AAR_XML_START % {
'name': name,
'binary_jar': aar['jar'],
}
if 'res' in aar:
xml += AAR_XML_RESOURCE % {'resource_path': aar['res']}
if 'assets' in aar:
xml += AAR_XML_RESOURCE % {'resource_path': aar['assets']}
xml += AAR_XML_END
# Write the library to a file
write_file_if_changed('.idea/libraries/%s.xml' % name, xml)
def write_libraries(libraries):
"""Writes an XML file to define each library."""
mkdir_p('.idea/libraries')
for library in libraries:
# Build up the XML.
name = library['name']
xml = LIBRARY_XML_START % {
'name': name,
'binary_jar': library['binaryJar'],
}
if 'javadocUrl' in library:
xml += LIBRARY_XML_WITH_JAVADOC % {
'javadoc_url': library['javadocUrl'],
}
else:
xml += LIBRARY_XML_NO_JAVADOC
if 'sourceJar' in library:
xml += LIBRARY_XML_WITH_SOURCES % {
'source_jar': library['sourceJar'],
}
else:
xml += LIBRARY_XML_NO_SOURCES
xml += LIBRARY_XML_END
# Write the library to a file
write_file_if_changed('.idea/libraries/%s.xml' % name, xml)
def write_run_configs():
"""Writes the run configurations that should be available"""
mkdir_p('.idea/runConfigurations')
xml = RUN_CONFIG_XML_START
xml += REMOTE_RUN_CONFIG_XML % {'name': "Debug Buck test"}
xml += RUN_CONFIG_XML_END
write_file_if_changed('.idea/runConfigurations/Debug_Buck_test.xml', xml)
def write_file_if_changed(path, content):
PROJECT_FILES.add(path)
if os.path.exists(path):
file_content_as_string = open(path, 'r').read()
needs_update = content.strip() != file_content_as_string.strip()
else:
needs_update = True
if needs_update:
out = open(path, 'wb')
out.write(content)
MODIFIED_FILES.append(path)
def mkdir_p(path):
"""Runs the equivalent of `mkdir -p`
Taken from http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
Args:
path: an absolute path
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def clean_old_files():
if os.path.isdir('.git'):
try:
files_to_clean = subprocess.check_output([
'git',
'ls-files',
'--other'])
for file_name in files_to_clean.splitlines():
if (file_name.endswith('.iml') and
file_name not in PROJECT_FILES):
os.remove(file_name)
return
except Exception as e:
pass
if __name__ == '__main__':
json_file = sys.argv[1]
generate_minimum_project = False
android_auto_generation_disabled = False
for key in sys.argv[2:]:
if key == '--generate_minimum_project':
generate_minimum_project = True
if key == '--disable_android_auto_generation_setting':
android_auto_generation_disabled = True
parsed_json = json.load(open(json_file, 'r'))
libraries = parsed_json['libraries']
write_libraries(libraries)
aars = parsed_json['aars']
write_aars(aars)
modules = parsed_json['modules']
write_modules(modules, generate_minimum_project, android_auto_generation_disabled)
write_all_modules(modules)
write_run_configs()
java_settings = parsed_json['java']
write_misc_file(java_settings)
# Write the list of modified files to stdout
for path in MODIFIED_FILES:
print path
print >> sys.stderr, (' :: Please resynchronize IntelliJ via File->Synchronize ' +
'or Cmd-Opt-Y (Mac) or Ctrl-Alt-Y (PC/Linux)')
| {
"content_hash": "bee94b9d5e846efa9e8a99adde98e4d1",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 127,
"avg_line_length": 37.60650406504065,
"alnum_prop": 0.577957454168108,
"repo_name": "1yvT0s/buck",
"id": "dad2c5cd993d0b6daabec8753490c75ca3b9b12a",
"size": "23128",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/com/facebook/buck/command/intellij.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "245769"
},
{
"name": "C++",
"bytes": "3765"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "623"
},
{
"name": "Groff",
"bytes": "440"
},
{
"name": "HTML",
"bytes": "4938"
},
{
"name": "IDL",
"bytes": "128"
},
{
"name": "Java",
"bytes": "9977219"
},
{
"name": "JavaScript",
"bytes": "931262"
},
{
"name": "Lex",
"bytes": "2442"
},
{
"name": "Makefile",
"bytes": "1791"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "2956"
},
{
"name": "Objective-C",
"bytes": "67487"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "143"
},
{
"name": "Python",
"bytes": "197714"
},
{
"name": "Rust",
"bytes": "938"
},
{
"name": "Shell",
"bytes": "30301"
},
{
"name": "Smalltalk",
"bytes": "438"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
} |
import shlex
import subprocess
import time
import pytest
import fsspec
pytest.importorskip("paramiko")
def stop_docker(name):
cmd = shlex.split('docker ps -a -q --filter "name=%s"' % name)
cid = subprocess.check_output(cmd).strip().decode()
if cid:
subprocess.call(["docker", "rm", "-f", cid])
@pytest.fixture(scope="module")
def ssh():
try:
subprocess.check_call(["docker", "run", "hello-world"])
except (subprocess.CalledProcessError, FileNotFoundError):
pytest.skip("docker run not available")
return
# requires docker
cmds = [
r"apt-get update",
r"apt-get install -y openssh-server",
r"mkdir /var/run/sshd",
"bash -c \"echo 'root:pass' | chpasswd\"",
(
r"sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' "
r"/etc/ssh/sshd_config"
),
(
r"sed 's@session\s*required\s*pam_loginuid.so@session optional "
r"pam_loginuid.so@g' -i /etc/pam.d/sshd"
),
r'bash -c "echo \"export VISIBLE=now\" >> /etc/profile"',
r"/usr/sbin/sshd",
]
name = "fsspec_sftp"
stop_docker(name)
cmd = "docker run -d -p 9200:22 --name {} ubuntu:16.04 sleep 9000".format(name)
cid = subprocess.check_output(shlex.split(cmd)).strip().decode()
for cmd in cmds:
subprocess.call(["docker", "exec", cid] + shlex.split(cmd))
try:
time.sleep(1)
yield dict(host="localhost", port=9200, username="root", password="pass")
finally:
stop_docker(name)
def test_simple(ssh):
f = fsspec.get_filesystem_class("sftp")(**ssh)
f.mkdirs("/home/someuser/deeper")
f.touch("/home/someuser/deeper/afile")
assert f.find("/home/someuser") == ["/home/someuser/deeper/afile"]
assert f.ls("/home/someuser/deeper/") == ["/home/someuser/deeper/afile"]
assert f.info("/home/someuser/deeper/afile")["type"] == "file"
assert f.info("/home/someuser/deeper/afile")["size"] == 0
assert f.exists("/home/someuser")
f.rm("/home/someuser", recursive=True)
assert not f.exists("/home/someuser")
@pytest.mark.parametrize("protocol", ["sftp", "ssh"])
def test_with_url(protocol, ssh):
fo = fsspec.open(
protocol + "://{username}:{password}@{host}:{port}"
"/home/someuserout".format(**ssh),
"wb",
)
with fo as f:
f.write(b"hello")
fo = fsspec.open(
protocol + "://{username}:{password}@{host}:{port}"
"/home/someuserout".format(**ssh),
"rb",
)
with fo as f:
assert f.read() == b"hello"
def test_transaction(ssh):
f = fsspec.get_filesystem_class("sftp")(**ssh)
f.mkdirs("/home/someuser/deeper")
f.start_transaction()
f.touch("/home/someuser/deeper/afile")
assert f.find("/home/someuser") == []
f.end_transaction()
f.find("/home/someuser") == ["/home/someuser/deeper/afile"]
with f.transaction:
assert f._intrans
f.touch("/home/someuser/deeper/afile2")
assert f.find("/home/someuser") == ["/home/someuser/deeper/afile"]
assert f.find("/home/someuser") == [
"/home/someuser/deeper/afile",
"/home/someuser/deeper/afile2",
]
def test_makedirs_exist_ok(ssh):
f = fsspec.get_filesystem_class("sftp")(**ssh)
f.makedirs("/a/b/c")
with pytest.raises(FileExistsError, match="/a/b/c"):
f.makedirs("/a/b/c", exist_ok=False)
f.makedirs("/a/b/c", exist_ok=True)
| {
"content_hash": "e9b3c5d9983255bdddce68f347607890",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 83,
"avg_line_length": 30.36521739130435,
"alnum_prop": 0.5947880870561283,
"repo_name": "intake/filesystem_spec",
"id": "a0ba10d825dcf58b880e0f324a47acbdf8215190",
"size": "3492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fsspec/implementations/tests/test_sftp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "526138"
}
],
"symlink_target": ""
} |
from silverflask import db
from tests.test_base import BaseTest
from silverflask.models import DataObject
from silverflask.mixins import OrderableMixin
class OrderedItem(OrderableMixin, DataObject, db.Model):
def __repr__(self):
return "<OrderedItem id: %s, sort_order: %s>" % (self.id, self.sort_order)
class TestOrdering(BaseTest):
def teardown(self):
pass
# db.drop_all()
# db.session.remove()
def test_ordering(self):
o1 = OrderedItem()
db.session.add(o1)
db.session.commit()
o2 = OrderedItem()
db.session.add(o2)
db.session.commit()
print(OrderedItem.singular_name, OrderedItem.plural_name)
assert OrderedItem.default_order is not None
assert(o1.sort_order == 1)
assert(o2.sort_order == 2)
o1.insert_after(o2.sort_order)
db.session.commit()
print(o1.sort_order)
assert(o1.sort_order == 3)
print(OrderedItem.query.all())
assert self.deepequal(OrderedItem.query.all(), [o2, o1])
o2.insert_after(o1.sort_order)
db.session.commit()
assert self.deepequal(OrderedItem.query.all(), [o1, o2])
o3 = OrderedItem()
db.session.add(o3)
db.session.commit()
assert self.deepequal(OrderedItem.query.all(), [o1, o2, o3])
o3.insert_after(0)
db.session.commit()
assert self.deepequal(OrderedItem.query.all(), [o3, o1, o2])
| {
"content_hash": "430f7bb8768d647132b0fad8caf059e2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 82,
"avg_line_length": 30.020408163265305,
"alnum_prop": 0.619986403806934,
"repo_name": "wolfv/SilverFlask",
"id": "66308a2d0d5e83eb712d181559291c9d0e4a8ac5",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ordering.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "246976"
},
{
"name": "CoffeeScript",
"bytes": "6611"
},
{
"name": "HTML",
"bytes": "65209"
},
{
"name": "JavaScript",
"bytes": "2602485"
},
{
"name": "Makefile",
"bytes": "366"
},
{
"name": "Python",
"bytes": "130958"
},
{
"name": "Scheme",
"bytes": "5830"
}
],
"symlink_target": ""
} |
from django import forms
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.template.context import RequestContext
from django.test.client import RequestFactory
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from nadmin import widgets as exwidgets
from nadmin.layout import FormHelper
from nadmin.models import UserSettings, UserWidget
from nadmin.sites import site
from nadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from nadmin.views.edit import CreateAdminView
from nadmin.views.list import ListAdminView
from nadmin.util import unquote
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_unicode(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_unicode(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'nadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
self.context(context)
return loader.render_to_string(self.template, context, context_instance=RequestContext(self.request))
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from nadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
forms.Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return models.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_unicode(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "nadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return models.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "nadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "nadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('nadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('nadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict([(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception, e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url('%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('nadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user, key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('nadmin.page.dashboard.js', 'nadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('nadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_unicode(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_unicode(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
| {
"content_hash": "be9fd7816ae3fdd9c6ee55fad99191a3",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 134,
"avg_line_length": 35.26737160120846,
"alnum_prop": 0.584314901272112,
"repo_name": "A425/django-nadmin",
"id": "1ee2bd7184785b8279a087e6b4e5da59e73277c6",
"size": "23347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nadmin/views/dashboard.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23733"
},
{
"name": "HTML",
"bytes": "95746"
},
{
"name": "JavaScript",
"bytes": "66338"
},
{
"name": "Python",
"bytes": "413023"
}
],
"symlink_target": ""
} |
import logging
import flask
import flask_cors
from docker_registry.core import compat
from docker_registry.core import exceptions
json = compat.json
from . import storage
from . import toolkit
from .lib import mirroring
from .lib import signals
from .app import app
store = storage.load()
logger = logging.getLogger(__name__)
"""Those routes are loaded only when `standalone' is enabled in the config
file. The goal is to make the Registry working without the central Index
It's then possible to push images from Docker without talking to any other
entities. This module mimics the Index.
"""
def generate_headers(namespace, repository, access):
registry_endpoints = toolkit.get_endpoints()
# The token generated will be invalid against a real Index behind.
token = 'Token signature={0},repository="{1}/{2}",access={3}'.format(
toolkit.gen_random_string(), namespace, repository, access)
return {'X-Docker-Endpoints': registry_endpoints,
'WWW-Authenticate': token,
'X-Docker-Token': token}
@app.route('/v1/users', methods=['GET', 'POST'])
@app.route('/v1/users/', methods=['GET', 'POST'])
def get_post_users():
if flask.request.method == 'GET':
return toolkit.response('OK', 200)
try:
# Note(dmp): unicode patch
json.loads(flask.request.data.decode('utf8'))
except ValueError:
return toolkit.api_error('Error Decoding JSON', 400)
return toolkit.response('User Created', 201)
@app.route('/v1/users/<username>/', methods=['PUT'])
def put_username(username):
return toolkit.response('', 204)
def update_index_images(namespace, repository, data):
path = store.index_images_path(namespace, repository)
sender = flask.current_app._get_current_object()
try:
images = {}
# Note(dmp): unicode patch
data = json.loads(data.decode('utf8')) + store.get_json(path)
for i in data:
iid = i['id']
if iid in images and 'checksum' in images[iid]:
continue
i_data = {'id': iid}
for key in ['checksum']:
if key in i:
i_data[key] = i[key]
images[iid] = i_data
data = images.values()
# Note(dmp): unicode patch
store.put_json(path, data)
signals.repository_updated.send(
sender, namespace=namespace, repository=repository, value=data)
except exceptions.FileNotFoundError:
signals.repository_created.send(
sender, namespace=namespace, repository=repository,
# Note(dmp): unicode patch
value=json.loads(data.decode('utf8')))
store.put_content(path, data)
@app.route('/v1/repositories/<path:repository>', methods=['PUT'])
@app.route('/v1/repositories/<path:repository>/images',
defaults={'images': True},
methods=['PUT'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def put_repository(namespace, repository, images=False):
data = None
try:
# Note(dmp): unicode patch
data = json.loads(flask.request.data.decode('utf8'))
except ValueError:
return toolkit.api_error('Error Decoding JSON', 400)
if not isinstance(data, list):
return toolkit.api_error('Invalid data')
update_index_images(namespace, repository, flask.request.data)
headers = generate_headers(namespace, repository, 'write')
code = 204 if images is True else 200
return toolkit.response('', code, headers)
@app.route('/v1/repositories/<path:repository>/images', methods=['GET'])
@flask_cors.cross_origin(methods=['GET']) # allow all origins (*)
@toolkit.parse_repository_name
@toolkit.requires_auth
@mirroring.source_lookup(index_route=True)
def get_repository_images(namespace, repository):
data = None
try:
path = store.index_images_path(namespace, repository)
data = store.get_content(path)
except exceptions.FileNotFoundError:
return toolkit.api_error('images not found', 404)
headers = generate_headers(namespace, repository, 'read')
return toolkit.response(data, 200, headers, True)
@app.route('/v1/repositories/<path:repository>/images', methods=['DELETE'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def delete_repository_images(namespace, repository):
# Does nothing, this file will be removed when DELETE on repos
headers = generate_headers(namespace, repository, 'delete')
return toolkit.response('', 204, headers)
@app.route('/v1/repositories/<path:repository>/auth', methods=['PUT'])
@toolkit.parse_repository_name
def put_repository_auth(namespace, repository):
return toolkit.response('OK')
| {
"content_hash": "3df1fbb73d881da53f5c0d9e45c13553",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 77,
"avg_line_length": 35.08955223880597,
"alnum_prop": 0.6695023394300298,
"repo_name": "nicescale/docker-registry",
"id": "3737bd8a9c823abd77b12e73fa3d8c7390399d20",
"size": "4727",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker_registry/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import os
import sys
from airflow import configuration as conf
from importlib import import_module
def prepare_classpath():
"""
Ensures that the Airflow home directory is on the classpath
"""
config_path = os.path.join(conf.get('core', 'airflow_home'), 'config')
config_path = os.path.expanduser(config_path)
if config_path not in sys.path:
sys.path.append(config_path)
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
raise ImportError("{} doesn't look like a module path".format(dotted_path))
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
raise ImportError('Module "{}" does not define a "{}" attribute/class'.format(
module_path, class_name)
)
| {
"content_hash": "ba62f085e84a75b5719bbd106bd33018",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 86,
"avg_line_length": 28.77777777777778,
"alnum_prop": 0.665057915057915,
"repo_name": "adamhaney/airflow",
"id": "51c6adf4e642c3f8f90dcd4e543c3440cced0c1f",
"size": "1847",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/utils/module_loading.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "3602"
},
{
"name": "HTML",
"bytes": "129737"
},
{
"name": "JavaScript",
"bytes": "22091"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5635329"
},
{
"name": "Shell",
"bytes": "41790"
}
],
"symlink_target": ""
} |
class MPNeuron(object):
def __init__(self, threshold, inputs):
self.threshold = threshold
self.inputs = inputs
def activate(self):
excitations = 0
for trigger in self.inputs:
if trigger.excitatory:
excitations += trigger.value
else:
if trigger.value:
return 0
if excitations >= self.threshold:
return 1
return 0
class MPInput(object):
def __init__(self, excitatory):
self.excitatory = excitatory
self.value = 0
def trigger(self, value):
self.value = value
class Decoder(object):
def __init__(self, vectors):
self.vectors = vectors
self.vec_length = len(self.vectors[0])
assert(len(vec) == self.vec_length for vec in vectors)
def decode(self):
decoder_units = []
for vector in self.vectors:
threshold = sum(vector)
inputs = []
for i in range(self.vec_length):
if vector[i] == 1:
inputs.append(MPInput(True))
else:
inputs.append(MPInput(False))
gate = MPNeuron(threshold, inputs)
decoder_units.append(gate)
def decoder(*args):
for i in range(self.vec_length):
inputs[i].trigger(args[i])
decoder_units.reverse()
or_arg = decoder_units[0].activate()
for unit in decoder_units:
for i in range(self.vec_length):
unit.inputs[i].trigger(args[i])
val = unit.activate()
or_arg = OR(or_arg, val)
return or_arg
return decoder
def AND(x1, x2):
inputs = [MPInput(True), MPInput(True)]
gate = MPNeuron(2, inputs)
inputs[0].trigger(x1)
inputs[1].trigger(x2)
return gate.activate()
def OR(x1, x2):
inputs = [MPInput(True), MPInput(True)]
gate = MPNeuron(1, inputs)
inputs[0].trigger(x1)
inputs[1].trigger(x2)
return gate.activate()
def NOT(x):
inputs = [MPInput(False)]
gate = MPNeuron(0, inputs)
inputs[0].trigger(x)
return gate.activate()
def XOR(x1, x2):
return AND(OR(x1, x2),
NOT(AND(x1, x2)))
| {
"content_hash": "ed724017f0440cab5945840c21c01854",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 62,
"avg_line_length": 28.37037037037037,
"alnum_prop": 0.5313315926892951,
"repo_name": "mcneela/Retina",
"id": "36d5e67fcc7fa33cbd16cc2c743cf195f0bff33d",
"size": "2298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "retina/mlearn/mcculloch-pitts/mcpitts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2076479"
},
{
"name": "HTML",
"bytes": "13176"
},
{
"name": "JavaScript",
"bytes": "2379445"
},
{
"name": "Jupyter Notebook",
"bytes": "1620710"
},
{
"name": "Python",
"bytes": "142775"
}
],
"symlink_target": ""
} |
"""The tests for the device tracker component."""
# pylint: disable=protected-access
import asyncio
import json
import logging
import unittest
from unittest.mock import call, patch
from datetime import datetime, timedelta
import os
from homeassistant.components import zone
from homeassistant.core import callback
from homeassistant.bootstrap import setup_component
from homeassistant.helpers import discovery
from homeassistant.loader import get_component
from homeassistant.util.async import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_ENTITY_PICTURE, ATTR_FRIENDLY_NAME, ATTR_HIDDEN,
STATE_HOME, STATE_NOT_HOME, CONF_PLATFORM)
import homeassistant.components.device_tracker as device_tracker
from homeassistant.exceptions import HomeAssistantError
from homeassistant.remote import JSONEncoder
from tests.common import (
get_test_home_assistant, fire_time_changed, fire_service_discovered,
patch_yaml_files, assert_setup_component)
from ...test_util.aiohttp import mock_aiohttp_client
TEST_PLATFORM = {device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}}
_LOGGER = logging.getLogger(__name__)
class TestComponentsDeviceTracker(unittest.TestCase):
"""Test the Device tracker."""
hass = None # HomeAssistant
yaml_devices = None # type: str
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.yaml_devices = self.hass.config.path(device_tracker.YAML_DEVICES)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
if os.path.isfile(self.yaml_devices):
os.remove(self.yaml_devices)
self.hass.stop()
def test_is_on(self):
"""Test is_on method."""
entity_id = device_tracker.ENTITY_ID_FORMAT.format('test')
self.hass.states.set(entity_id, STATE_HOME)
self.assertTrue(device_tracker.is_on(self.hass, entity_id))
self.hass.states.set(entity_id, STATE_NOT_HOME)
self.assertFalse(device_tracker.is_on(self.hass, entity_id))
# pylint: disable=no-self-use
def test_reading_broken_yaml_config(self):
"""Test when known devices contains invalid data."""
files = {'empty.yaml': '',
'nodict.yaml': '100',
'badkey.yaml': '@:\n name: Device',
'noname.yaml': 'my_device:\n',
'allok.yaml': 'My Device:\n name: Device',
'oneok.yaml': ('My Device!:\n name: Device\n'
'bad_device:\n nme: Device')}
args = {'hass': self.hass, 'consider_home': timedelta(seconds=60)}
with patch_yaml_files(files):
assert device_tracker.load_config('empty.yaml', **args) == []
assert device_tracker.load_config('nodict.yaml', **args) == []
assert device_tracker.load_config('noname.yaml', **args) == []
assert device_tracker.load_config('badkey.yaml', **args) == []
res = device_tracker.load_config('allok.yaml', **args)
assert len(res) == 1
assert res[0].name == 'Device'
assert res[0].dev_id == 'my_device'
res = device_tracker.load_config('oneok.yaml', **args)
assert len(res) == 1
assert res[0].name == 'Device'
assert res[0].dev_id == 'my_device'
def test_reading_yaml_config(self):
"""Test the rendering of the YAML configuration."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', picture='http://test.picture',
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
config = device_tracker.load_config(self.yaml_devices, self.hass,
device.consider_home)[0]
self.assertEqual(device.dev_id, config.dev_id)
self.assertEqual(device.track, config.track)
self.assertEqual(device.mac, config.mac)
self.assertEqual(device.config_picture, config.config_picture)
self.assertEqual(device.away_hide, config.away_hide)
self.assertEqual(device.consider_home, config.consider_home)
self.assertEqual(device.vendor, config.vendor)
# pylint: disable=invalid-name
@patch('homeassistant.components.device_tracker._LOGGER.warning')
def test_track_with_duplicate_mac_dev_id(self, mock_warning):
"""Test adding duplicate MACs or device IDs to DeviceTracker."""
devices = [
device_tracker.Device(self.hass, True, True, 'my_device', 'AB:01',
'My device', None, None, False),
device_tracker.Device(self.hass, True, True, 'your_device',
'AB:01', 'Your device', None, None, False)]
device_tracker.DeviceTracker(self.hass, False, True, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert mock_warning.call_count == 1, \
"The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert 'Duplicate device MAC' in args[0], \
'Duplicate MAC warning expected'
mock_warning.reset_mock()
devices = [
device_tracker.Device(self.hass, True, True, 'my_device',
'AB:01', 'My device', None, None, False),
device_tracker.Device(self.hass, True, True, 'my_device',
None, 'Your device', None, None, False)]
device_tracker.DeviceTracker(self.hass, False, True, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert mock_warning.call_count == 1, \
"The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert 'Duplicate device IDs' in args[0], \
'Duplicate device IDs warning expected'
def test_setup_without_yaml_file(self):
"""Test with no YAML file."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
# pylint: disable=invalid-name
def test_adding_unknown_device_to_config(self):
"""Test the adding of unknown devices to configuration file."""
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('DEV1')
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}})
# wait for async calls (macvendor) to finish
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 1
assert config[0].dev_id == 'dev1'
assert config[0].track
def test_gravatar(self):
"""Test the Gravatar generation."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', gravatar='[email protected]')
gravatar_url = ("https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar")
self.assertEqual(device.config_picture, gravatar_url)
def test_gravatar_and_picture(self):
"""Test that Gravatar overrides picture."""
dev_id = 'test'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id,
'AB:CD:EF:GH:IJ', 'Test name', picture='http://test.picture',
gravatar='[email protected]')
gravatar_url = ("https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar")
self.assertEqual(device.config_picture, gravatar_url)
def test_mac_vendor_lookup(self):
"""Test if vendor string is lookup on macvendors API."""
mac = 'B8:27:EB:00:00:00'
vendor_string = 'Raspberry Pi Foundation'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
text=vendor_string)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
assert aioclient_mock.call_count == 1
self.assertEqual(device.vendor, vendor_string)
def test_mac_vendor_mac_formats(self):
"""Verify all variations of MAC addresses are handled correctly."""
vendor_string = 'Raspberry Pi Foundation'
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
text=vendor_string)
aioclient_mock.get('http://api.macvendors.com/00:27:eb',
text=vendor_string)
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180),
True, 'test', mac, 'Test name')
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, vendor_string)
mac = '0:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180),
True, 'test', mac, 'Test name')
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, vendor_string)
mac = 'PREFIXED_B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180),
True, 'test', mac, 'Test name')
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, vendor_string)
def test_mac_vendor_lookup_unknown(self):
"""Prevent another mac vendor lookup if was not found first time."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
status=404)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_error(self):
"""Prevent another lookup if failure during API call."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
status=500)
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_exception(self):
"""Prevent another lookup if exception during API call."""
mac = 'B8:27:EB:00:00:00'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, 'test', mac, 'Test name')
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
exc=asyncio.TimeoutError())
run_coroutine_threadsafe(device.set_vendor_for_mac(),
self.hass.loop).result()
self.assertEqual(device.vendor, 'unknown')
def test_mac_vendor_lookup_on_see(self):
"""Test if macvendor is looked up when device is seen."""
mac = 'B8:27:EB:00:00:00'
vendor_string = 'Raspberry Pi Foundation'
tracker = device_tracker.DeviceTracker(
self.hass, timedelta(seconds=60), 0, [])
with mock_aiohttp_client() as aioclient_mock:
aioclient_mock.get('http://api.macvendors.com/b8:27:eb',
text=vendor_string)
run_coroutine_threadsafe(
tracker.async_see(mac=mac), self.hass.loop).result()
assert aioclient_mock.call_count == 1, \
'No http request for macvendor made!'
self.assertEqual(tracker.devices['b827eb000000'].vendor, vendor_string)
def test_discovery(self):
"""Test discovery."""
scanner = get_component('device_tracker.test').SCANNER
with patch.dict(device_tracker.DISCOVERY_PLATFORMS, {'test': 'test'}):
with patch.object(scanner, 'scan_devices',
autospec=True) as mock_scan:
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(
self.hass, device_tracker.DOMAIN, TEST_PLATFORM)
fire_service_discovered(self.hass, 'test', {})
self.assertTrue(mock_scan.called)
@patch(
'homeassistant.components.device_tracker.DeviceTracker.see')
@patch(
'homeassistant.components.device_tracker.demo.setup_scanner',
autospec=True)
def test_discover_platform(self, mock_demo_setup_scanner, mock_see):
"""Test discovery of device_tracker demo platform."""
assert device_tracker.DOMAIN not in self.hass.config.components
discovery.load_platform(
self.hass, device_tracker.DOMAIN, 'demo', {'test_key': 'test_val'},
{})
self.hass.block_till_done()
assert device_tracker.DOMAIN in self.hass.config.components
assert mock_demo_setup_scanner.called
assert mock_demo_setup_scanner.call_args[0] == (
self.hass, {}, mock_see, {'test_key': 'test_val'})
def test_update_stale(self):
"""Test stalled update."""
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('DEV1')
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=register_time):
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'test',
device_tracker.CONF_CONSIDER_HOME: 59,
}})
self.assertEqual(STATE_HOME,
self.hass.states.get('device_tracker.dev1').state)
scanner.leave_home('DEV1')
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=scan_time):
fire_time_changed(self.hass, scan_time)
self.hass.block_till_done()
self.assertEqual(STATE_NOT_HOME,
self.hass.states.get('device_tracker.dev1').state)
def test_entity_attributes(self):
"""Test the entity attributes."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
friendly_name = 'Paulus'
picture = 'http://placehold.it/200x200'
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
friendly_name, picture, hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
attrs = self.hass.states.get(entity_id).attributes
self.assertEqual(friendly_name, attrs.get(ATTR_FRIENDLY_NAME))
self.assertEqual(picture, attrs.get(ATTR_ENTITY_PICTURE))
def test_device_hidden(self):
"""Test hidden devices."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
self.assertTrue(self.hass.states.get(entity_id)
.attributes.get(ATTR_HIDDEN))
def test_group_all_devices(self):
"""Test grouping of devices."""
dev_id = 'test_entity'
entity_id = device_tracker.ENTITY_ID_FORMAT.format(dev_id)
device = device_tracker.Device(
self.hass, timedelta(seconds=180), True, dev_id, None,
hide_if_away=True)
device_tracker.update_config(self.yaml_devices, dev_id, device)
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
state = self.hass.states.get(device_tracker.ENTITY_ID_ALL_DEVICES)
self.assertIsNotNone(state)
self.assertEqual(STATE_NOT_HOME, state.state)
self.assertSequenceEqual((entity_id,),
state.attributes.get(ATTR_ENTITY_ID))
@patch('homeassistant.components.device_tracker.DeviceTracker.async_see')
def test_see_service(self, mock_see):
"""Test the see service with a unicode dev_id and NO MAC."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
params = {
'dev_id': 'some_device',
'host_name': 'example.com',
'location_name': 'Work',
'gps': [.3, .8],
'attributes': {
'test': 'test'
}
}
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
assert mock_see.call_count == 1
self.assertEqual(mock_see.call_count, 1)
self.assertEqual(mock_see.call_args, call(**params))
mock_see.reset_mock()
params['dev_id'] += chr(233) # e' acute accent from icloud
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
assert mock_see.call_count == 1
self.assertEqual(mock_see.call_count, 1)
self.assertEqual(mock_see.call_args, call(**params))
def test_new_device_event_fired(self):
"""Test that the device tracker will fire an event."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
test_events = []
@callback
def listener(event):
"""Helper method that will verify our event got called."""
test_events.append(event)
self.hass.bus.listen("device_tracker_new_device", listener)
device_tracker.see(self.hass, 'mac_1', host_name='hello')
device_tracker.see(self.hass, 'mac_1', host_name='hello')
self.hass.block_till_done()
assert len(test_events) == 1
# Assert we can serialize the event
json.dumps(test_events[0].as_dict(), cls=JSONEncoder)
assert test_events[0].data == {
'entity_id': 'device_tracker.hello',
'host_name': 'hello',
}
# pylint: disable=invalid-name
def test_not_write_duplicate_yaml_keys(self):
"""Test that the device tracker will not generate invalid YAML."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
device_tracker.see(self.hass, 'mac_1', host_name='hello')
device_tracker.see(self.hass, 'mac_2', host_name='hello')
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 2
# pylint: disable=invalid-name
def test_not_allow_invalid_dev_id(self):
"""Test that the device tracker will not allow invalid dev ids."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM)
device_tracker.see(self.hass, dev_id='hello-world')
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 0
def test_see_state(self):
"""Test device tracker see records state correctly."""
self.assertTrue(setup_component(self.hass, device_tracker.DOMAIN,
TEST_PLATFORM))
params = {
'mac': 'AA:BB:CC:DD:EE:FF',
'dev_id': 'some_device',
'host_name': 'example.com',
'location_name': 'Work',
'gps': [.3, .8],
'gps_accuracy': 1,
'battery': 100,
'attributes': {
'test': 'test',
'number': 1,
},
}
device_tracker.see(self.hass, **params)
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert len(config) == 1
state = self.hass.states.get('device_tracker.examplecom')
attrs = state.attributes
self.assertEqual(state.state, 'Work')
self.assertEqual(state.object_id, 'examplecom')
self.assertEqual(state.name, 'example.com')
self.assertEqual(attrs['friendly_name'], 'example.com')
self.assertEqual(attrs['battery'], 100)
self.assertEqual(attrs['latitude'], 0.3)
self.assertEqual(attrs['longitude'], 0.8)
self.assertEqual(attrs['test'], 'test')
self.assertEqual(attrs['gps_accuracy'], 1)
self.assertEqual(attrs['source_type'], 'gps')
self.assertEqual(attrs['number'], 1)
def test_see_passive_zone_state(self):
"""Test that the device tracker sets gps for passive trackers."""
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with assert_setup_component(1, zone.DOMAIN):
zone_info = {
'name': 'Home',
'latitude': 1,
'longitude': 2,
'radius': 250,
'passive': False
}
setup_component(self.hass, zone.DOMAIN, {
'zone': zone_info
})
scanner = get_component('device_tracker.test').SCANNER
scanner.reset()
scanner.come_home('dev1')
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=register_time):
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'test',
device_tracker.CONF_CONSIDER_HOME: 59,
}})
state = self.hass.states.get('device_tracker.dev1')
attrs = state.attributes
self.assertEqual(STATE_HOME, state.state)
self.assertEqual(state.object_id, 'dev1')
self.assertEqual(state.name, 'dev1')
self.assertEqual(attrs.get('friendly_name'), 'dev1')
self.assertEqual(attrs.get('latitude'), 1)
self.assertEqual(attrs.get('longitude'), 2)
self.assertEqual(attrs.get('gps_accuracy'), 0)
self.assertEqual(attrs.get('source_type'),
device_tracker.SOURCE_TYPE_ROUTER)
scanner.leave_home('dev1')
with patch('homeassistant.components.device_tracker.dt_util.utcnow',
return_value=scan_time):
fire_time_changed(self.hass, scan_time)
self.hass.block_till_done()
state = self.hass.states.get('device_tracker.dev1')
attrs = state.attributes
self.assertEqual(STATE_NOT_HOME, state.state)
self.assertEqual(state.object_id, 'dev1')
self.assertEqual(state.name, 'dev1')
self.assertEqual(attrs.get('friendly_name'), 'dev1')
self.assertEqual(attrs.get('latitude'), None)
self.assertEqual(attrs.get('longitude'), None)
self.assertEqual(attrs.get('gps_accuracy'), None)
self.assertEqual(attrs.get('source_type'),
device_tracker.SOURCE_TYPE_ROUTER)
@patch('homeassistant.components.device_tracker._LOGGER.warning')
def test_see_failures(self, mock_warning):
"""Test that the device tracker see failures."""
tracker = device_tracker.DeviceTracker(
self.hass, timedelta(seconds=60), 0, [])
# MAC is not a string (but added)
tracker.see(mac=567, host_name="Number MAC")
# No device id or MAC(not added)
with self.assertRaises(HomeAssistantError):
run_coroutine_threadsafe(
tracker.async_see(), self.hass.loop).result()
assert mock_warning.call_count == 0
# Ignore gps on invalid GPS (both added & warnings)
tracker.see(mac='mac_1_bad_gps', gps=1)
tracker.see(mac='mac_2_bad_gps', gps=[1])
tracker.see(mac='mac_3_bad_gps', gps='gps')
self.hass.block_till_done()
config = device_tracker.load_config(self.yaml_devices, self.hass,
timedelta(seconds=0))
assert mock_warning.call_count == 3
assert len(config) == 4
@patch('homeassistant.components.device_tracker.async_log_exception')
def test_config_failure(self, mock_ex):
"""Test that the device tracker see failures."""
with assert_setup_component(0, device_tracker.DOMAIN):
setup_component(self.hass, device_tracker.DOMAIN,
{device_tracker.DOMAIN: {
device_tracker.CONF_CONSIDER_HOME: -1}})
| {
"content_hash": "f10fa50d3353e9c7a08ee3d68d6f1d29",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 79,
"avg_line_length": 41.99088145896656,
"alnum_prop": 0.5866087585957293,
"repo_name": "kyvinh/home-assistant",
"id": "4f932cd177fac522b39bb649025ceaa49f8ee934",
"size": "27630",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/device_tracker/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1548645"
},
{
"name": "Python",
"bytes": "5298607"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14220"
}
],
"symlink_target": ""
} |
from webob import exc
import nova
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as views_addresses
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.i18n import _
ALIAS = 'ips'
class IPsController(wsgi.Controller):
"""The servers addresses API controller for the OpenStack API."""
_view_builder_class = views_addresses.ViewBuilderV3
def __init__(self, **kwargs):
super(IPsController, self).__init__(**kwargs)
self._compute_api = nova.compute.API()
def _get_instance(self, context, server_id):
try:
instance = self._compute_api.get(context, server_id)
except nova.exception.NotFound:
msg = _("Instance does not exist")
raise exc.HTTPNotFound(explanation=msg)
return instance
def index(self, req, server_id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
return self._view_builder.index(networks)
def show(self, req, server_id, id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
if id not in networks:
msg = _("Instance is not a member of specified network")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(networks[id], id)
class IPs(extensions.V3APIExtensionBase):
"""Server addresses."""
name = "Ips"
alias = ALIAS
version = 1
def get_resources(self):
parent = {'member_name': 'server',
'collection_name': 'servers'}
resources = [
extensions.ResourceExtension(
ALIAS, IPsController(), parent=parent, member_name='ip')]
return resources
def get_controller_extensions(self):
return []
| {
"content_hash": "d657f3a53c339b5ccc6d71171db7c214",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 73,
"avg_line_length": 31.5,
"alnum_prop": 0.6458333333333334,
"repo_name": "jumpstarter-io/nova",
"id": "cfdbacf44c0c18e92483b221a3588a6abfc487c6",
"size": "2652",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/plugins/v3/ips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import annotations
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
from airflow.providers.amazon.aws.sensors.sagemaker import SageMakerTransformSensor
DESCRIBE_TRANSFORM_INPROGRESS_RESPONSE = {
'TransformJobStatus': 'InProgress',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
}
DESCRIBE_TRANSFORM_COMPLETED_RESPONSE = {
'TransformJobStatus': 'Completed',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
}
DESCRIBE_TRANSFORM_FAILED_RESPONSE = {
'TransformJobStatus': 'Failed',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
'FailureReason': 'Unknown',
}
DESCRIBE_TRANSFORM_STOPPING_RESPONSE = {
'TransformJobStatus': 'Stopping',
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
}
class TestSageMakerTransformSensor(unittest.TestCase):
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, 'describe_transform_job')
def test_sensor_with_failure(self, mock_describe_job, mock_client):
mock_describe_job.side_effect = [DESCRIBE_TRANSFORM_FAILED_RESPONSE]
sensor = SageMakerTransformSensor(
task_id='test_task', poke_interval=2, aws_conn_id='aws_test', job_name='test_job_name'
)
with pytest.raises(AirflowException):
sensor.execute(None)
mock_describe_job.assert_called_once_with('test_job_name')
@mock.patch.object(SageMakerHook, 'get_conn')
@mock.patch.object(SageMakerHook, '__init__')
@mock.patch.object(SageMakerHook, 'describe_transform_job')
def test_sensor(self, mock_describe_job, hook_init, mock_client):
hook_init.return_value = None
mock_describe_job.side_effect = [
DESCRIBE_TRANSFORM_INPROGRESS_RESPONSE,
DESCRIBE_TRANSFORM_STOPPING_RESPONSE,
DESCRIBE_TRANSFORM_COMPLETED_RESPONSE,
]
sensor = SageMakerTransformSensor(
task_id='test_task', poke_interval=2, aws_conn_id='aws_test', job_name='test_job_name'
)
sensor.execute(None)
# make sure we called 3 times(terminated when its completed)
assert mock_describe_job.call_count == 3
# make sure the hook was initialized with the specific params
calls = [mock.call(aws_conn_id='aws_test')]
hook_init.assert_has_calls(calls)
| {
"content_hash": "34bcb422ccc799c06b9190d9d5e9b93f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 98,
"avg_line_length": 33.9041095890411,
"alnum_prop": 0.6787878787878788,
"repo_name": "cfei18/incubator-airflow",
"id": "860f5c5d43a0bf9f1b22901bab8e6a7ba98b0b7b",
"size": "3262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/amazon/aws/sensors/test_sagemaker_transform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
"""Create pose regression dataset.
Given a CAD directory structured like:
CAD
Object Class
Object files
Render objects at different rotations and save the image and angle of rotation.
Warning: Overwrites pusher.xml in gym package.
"""
from __future__ import print_function
import contextlib
import logging
import os
import pickle
import random
import tempfile
from absl import app
from absl import flags
import gym
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import stl
from stl import mesh
import tensorflow.compat.v1 as tf
logging.getLogger("stl").setLevel(logging.ERROR)
flags.DEFINE_string("CAD_dir", None,
"Directory of CAD models.")
flags.DEFINE_string("data_dir", None,
"Directory where generated data is stored.")
FLAGS = flags.FLAGS
def find_mins_maxs(obj):
"""Find the max dimensions.
So we can know the bounding box, getting the height, width, length
(because these are the step size)...
Args:
obj: Mesh object.
Returns:
minx: Extreme dimension.
maxx: Extreme dimension.
miny: Extreme dimension.
maxy: Extreme dimension.
minz: Extreme dimension.
maxz: Extreme dimension.
"""
minx = maxx = miny = maxy = minz = maxz = None
for p in obj.points:
# p contains (x, y, z)
if minx is None:
minx = p[stl.Dimension.X]
maxx = p[stl.Dimension.X]
miny = p[stl.Dimension.Y]
maxy = p[stl.Dimension.Y]
minz = p[stl.Dimension.Z]
maxz = p[stl.Dimension.Z]
else:
maxx = max(p[stl.Dimension.X], maxx)
minx = min(p[stl.Dimension.X], minx)
maxy = max(p[stl.Dimension.Y], maxy)
miny = min(p[stl.Dimension.Y], miny)
maxz = max(p[stl.Dimension.Z], maxz)
minz = min(p[stl.Dimension.Z], minz)
return minx, maxx, miny, maxy, minz, maxz
class MJCModel(object):
"""Mujoco Model."""
def __init__(self, name):
self.name = name
self.root = MJCTreeNode("mujoco").add_attr("model", name)
@contextlib.contextmanager
def asfile(self):
"""Usage information.
Usage:
model = MJCModel('reacher')
with model.asfile() as f:
print f.read() # prints a dump of the model
Yields:
f: File.
"""
with tempfile.NamedTemporaryFile(
mode="w+b", suffix=".xml", delete=True) as f:
self.root.write(f)
f.seek(0)
yield f
def open(self):
self.file = tempfile.NamedTemporaryFile(
mode="w+b", suffix=".xml", delete=True)
self.root.write(self.file)
self.file.seek(0)
return self.file
def save(self, path):
with open(path, "w") as f:
self.root.write(f)
def close(self):
self.file.close()
class MJCModelRegen(MJCModel):
def __init__(self, name, regen_fn):
super(MJCModelRegen, self).__init__(name)
self.regen_fn = regen_fn
def regenerate(self):
self.root = self.regen_fn().root
class MJCTreeNode(object):
"""Mujoco Tree Node."""
def __init__(self, name):
self.name = name
self.attrs = {}
self.children = []
def add_attr(self, key, value):
if isinstance(value, str):
pass
elif isinstance(value, list) or isinstance(value, np.ndarray):
value = " ".join([str(val) for val in value])
self.attrs[key] = value
return self
def __getattr__(self, name):
def wrapper(**kwargs):
newnode = MJCTreeNode(name)
for (k, v) in kwargs.items(): # iteritems in python2
newnode.add_attr(k, v)
self.children.append(newnode)
return newnode
return wrapper
def dfs(self):
yield self
if self.children:
for child in self.children:
for node in child.dfs():
yield node
def write(self, ostream, tabs=0):
"""Write out the object as a string."""
contents = " ".join(['%s="%s"' % (k, v) for (k, v) in self.attrs.items()])
if self.children:
ostream.write("\t" * tabs)
ostream.write("<%s %s>\n" % (self.name, contents))
for child in self.children:
child.write(ostream, tabs=tabs + 1)
ostream.write("\t" * tabs)
ostream.write("</%s>\n" % self.name)
else:
ostream.write("\t" * tabs)
ostream.write("<%s %s/>\n" % (self.name, contents))
def __str__(self):
s = "<" + self.name
s += " ".join(['%s="%s"' % (k, v) for (k, v) in self.attrs.items()])
return s + ">"
def pusher(obj_scale=None,
obj_mass=None,
obj_damping=None,
object_pos=(0.45, -0.05, -0.275),
distr_scale=None,
axisangle=(0, 0, 1, 1.5),
distr_mass=None,
distr_damping=None,
goal_pos=(0.45, -0.05, -0.3230),
distractor_pos=(0.45, -0.05, -0.275),
mesh_file=None,
mesh_file_path=None,
distractor_mesh_file=None,
friction=(.8, .1, .1),
table_texture=None,
distractor_texture=None,
obj_texture=None,
table_pos="0 0.5 -0.325",
table_size="1 1 0.1"):
"""Create the pusher Mujoco object from the mesh file."""
object_pos, goal_pos, distractor_pos, friction = \
list(object_pos), list(goal_pos), list(distractor_pos), list(friction)
# For now, only supports one distractor
# if obj_scale is None:
# #obj_scale = random.uniform(0.5, 1.0)
# obj_scale = 4.0
if obj_mass is None:
obj_mass = random.uniform(0.2, 2.0)
if obj_damping is None:
obj_damping = random.uniform(0.2, 5.0)
obj_damping = str(obj_damping)
if distractor_mesh_file:
if distr_scale is None:
distr_scale = random.uniform(0.5, 1.0)
if distr_mass is None:
distr_mass = random.uniform(0.2, 2.0)
if distr_damping is None:
distr_damping = random.uniform(0.2, 5.0)
distr_damping = str(distr_damping)
mjcmodel = MJCModel("arm3d")
mjcmodel.root.compiler(
inertiafromgeom="true", angle="radian", coordinate="local")
mjcmodel.root.option(
timestep="0.01", gravity="0 0 0", iterations="20", integrator="Euler")
default = mjcmodel.root.default()
default.joint(armature="0.04", damping=1, limited="true")
default.geom(
friction=friction,
density="300",
margin="0.002",
condim="1",
contype="0",
conaffinity="0")
# Make table
worldbody = mjcmodel.root.worldbody()
worldbody.light(diffuse=".5 .5 .5", pos="0 0 5", dir="0 0 -1")
if table_texture:
worldbody.geom(
name="table",
material="table",
type="plane",
pos="0 0.5 -0.325",
size="1 1 0.1",
contype="1",
conaffinity="1")
else:
worldbody.geom(
name="table",
type="plane",
pos=table_pos,
size=table_size,
contype="1",
conaffinity="1")
# Process object physical properties
if mesh_file is not None:
mesh_object = mesh.Mesh.from_file(mesh_file)
vol, cog, inertia = mesh_object.get_mass_properties()
inertia = np.abs(inertia)
vol = np.abs(vol)
cog = np.abs(cog)
minx, maxx, miny, maxy, minz, maxz = find_mins_maxs(mesh_object)
max_length = max((maxx - minx), max((maxy - miny), (maxz - minz)))
if max_length > 0.5:
obj_scale = 5
else:
obj_scale = 7
scale = obj_scale * 0.0012 * (200.0 / max_length)
# print('max_length=', max_length)
object_density = np.abs(obj_mass / (vol * scale * scale * scale))
object_pos[0] -= scale * (minx + maxx) / 2.0
object_pos[1] -= scale * (miny + maxy) / 2.0
object_pos[2] = -0.324 - scale * minz
object_scale = scale
if distractor_mesh_file is not None:
distr_mesh_object = mesh.Mesh.from_file(distractor_mesh_file)
vol, cog, inertia = distr_mesh_object.get_mass_properties()
inertia = np.abs(inertia)
minx, maxx, miny, maxy, minz, maxz = find_mins_maxs(distr_mesh_object)
max_length = max((maxx - minx), max((maxy - miny), (maxz - minz)))
distr_scale = distr_scale * 0.0012 * (200.0 / max_length)
distr_density = distr_mass / (vol * distr_scale * distr_scale * distr_scale)
distractor_pos[0] -= distr_scale * (minx + maxx) / 2.0
distractor_pos[1] -= distr_scale * (miny + maxy) / 2.0
distractor_pos[2] = -0.324 - distr_scale * minz
## MAKE DISTRACTOR
if distractor_mesh_file:
distractor = worldbody.body(name="distractor", pos=distractor_pos)
if distractor_mesh_file is None:
distractor.geom(
rgba="1 1 1 1",
type="cylinder",
size="0.05 0.05 0.05",
density="0.00001",
contype="1",
conaffinity="0")
else:
if distractor_texture:
distractor.geom(
material="distractor",
conaffinity="0",
contype="1",
density=str(distr_density),
mesh="distractor_mesh",
rgba="1 1 1 1",
type="mesh")
else:
distractor.geom(
conaffinity="0",
contype="1",
density=str(distr_density),
mesh="distractor_mesh",
rgba="1 1 1 1",
type="mesh")
distractor.joint(
name="distractor_slidey",
type="slide",
pos="0 0 0",
axis="0 1 0",
range="-10.3213 10.3",
damping=distr_damping)
distractor.joint(
name="distractor_slidex",
type="slide",
pos="0 0 0",
axis="1 0 0",
range="-10.3213 10.3",
damping=distr_damping)
# MAKE TARGET OBJECT
obj = worldbody.body(name="object", pos=object_pos, axisangle=axisangle)
if mesh_file is None:
obj.geom(
rgba="1 1 1 1",
type="cylinder",
size="0.05 0.05 0.05",
density="0.00001",
contype="1",
conaffinity="0")
else:
if obj_texture:
obj.geom(
material="object",
conaffinity="0",
contype="1",
density=str(object_density),
mesh="object_mesh",
rgba="1 1 1 1",
type="mesh")
else:
obj.geom(
conaffinity="0",
contype="1",
density=str(object_density),
mesh="object_mesh",
rgba="1 1 1 1",
type="mesh")
obj.joint(
name="obj_slidey",
type="slide",
pos="0 0 0",
axis="0 1 0",
range="-10.3213 10.3",
damping=obj_damping)
obj.joint(
name="obj_slidex",
type="slide",
pos="0 0 0",
axis="1 0 0",
range="-10.3213 10.3",
damping=obj_damping)
goal = worldbody.body(name="goal", pos=goal_pos)
goal.geom(
rgba="1 0 0 0",
type="cylinder",
size="0.08 0.001 0.1",
density="0.00001",
contype="0",
conaffinity="0")
goal.joint(
name="goal_slidey",
type="slide",
pos="0 0 0",
axis="0 1 0",
range="-10.3213 10.3",
damping="0.5")
goal.joint(
name="goal_slidex",
type="slide",
pos="0 0 0",
axis="1 0 0",
range="-10.3213 10.3",
damping="0.5")
asset = mjcmodel.root.asset()
if table_texture:
asset.texture(name="table", file=table_texture, type="2d")
asset.material(
shininess="0.3",
specular="1",
name="table",
rgba="0.9 0.9 0.9 1",
texture="table")
asset.mesh(
file=mesh_file_path, name="object_mesh",
scale=[object_scale] * 3) # figure out the proper scale
if obj_texture:
asset.texture(name="object", file=obj_texture)
asset.material(
shininess="0.3",
specular="1",
name="object",
rgba="0.9 0.9 0.9 1",
texture="object")
actuator = mjcmodel.root.actuator()
actuator.motor(joint="goal_slidex", ctrlrange="-2.0 2.0", ctrllimited="true")
tips_arm = worldbody.body(name="tips_arm", pos=goal_pos)
tips_arm.geom(
rgba="1 0 0 0",
type="cylinder",
size="0.08 0.001 0.1",
density="0.00001",
contype="0",
conaffinity="0")
return mjcmodel
def render_images(stl_file, dest_dir):
"""Render images of rotated object."""
(sub_path, obj) = os.path.split(stl_file)
obj = obj[:-4] # Strip extension
sub_name = os.path.split(sub_path)[-1]
des_path = os.path.join(dest_dir, "rotate", sub_name, obj)
if not os.path.exists(des_path):
os.makedirs(des_path)
ylabels = []
for i in range(100):
# np.random.seed(i**2)
# random.seed(i**2)
x_pos = 0.5 # random.uniform(0,1)
y_pos = 0 # random.uniform(-1,1)
angle_pos = random.uniform(0, 2 * np.pi)
model = pusher(
mesh_file=stl_file,
mesh_file_path=stl_file,
object_pos=(x_pos, y_pos, 0.3),
axisangle=[0, 0, 1, angle_pos],
table_pos="0.5 0.2 -0.4",
table_size="1.0 1.6 1")
model.save(os.path.join(gym.__path__[0][:-4],
"gym/envs/mujoco/assets/pusher.xml"))
# print(GYM_PATH + '/gym/envs/mujoco/assets/pusher.xml')
# copy2(args.obj_filepath, GYM_PATH+'/gym/envs/mujoco/assets')
env = gym.envs.make("Pusher-v2")
screen = env.render(mode="rgb_array")
# res = cv2.resize(screen, dsize=(128,128), interpolation=cv2.INTER_AREA)
res = screen
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
ax.axis("off")
ax.imshow(res)
ax.set_position([0, 0, 1, 1])
plt.savefig(des_path + "/" + str(i) + ".png")
plt.close()
ylabel = [i, x_pos, y_pos, angle_pos]
ylabels.append(ylabel)
pickle.dump(
ylabels,
open(des_path + "/" + "label_" + sub_name + obj, "wb"),
protocol=2)
def main(_):
CAD_dir = FLAGS.CAD_dir # pylint: disable=invalid-name
# Skip these because they are symmetric
classes_to_skip = ["bottle", "train"]
for obj_class in tf.io.gfile.listdir(CAD_dir):
if obj_class in classes_to_skip:
continue
for obj in tf.io.gfile.listdir(os.path.join(CAD_dir, obj_class)):
if obj.endswith(".stl"):
print("Rendering %s from object class %s" % (obj, obj_class))
try:
render_images(os.path.join(CAD_dir, obj_class, obj), FLAGS.data_dir)
except Exception: # pylint: disable=broad-except
print("Failed to render %s from object class %s" % (obj, obj_class))
if __name__ == "__main__":
app.run(main)
| {
"content_hash": "bcc94f86dd13d7d99a4d816601d8a37c",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 80,
"avg_line_length": 27.886497064579256,
"alnum_prop": 0.5771228070175438,
"repo_name": "google-research/google-research",
"id": "fa722c4ee0e6a41a384c8f626604943838750eb0",
"size": "14858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meta_learning_without_memorization/pose_data/mujoco_render.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import json
from . import http
from app import app
from common.db import session
from common import utils
from flask import session as sess
from models.feedback import Feedback
def test_create_feedback():
with app.test_client() as c:
with c.session_transaction() as s:
s["is_login"] = True
s["id"] = 2
rv = http(c, "post", "/feedbacks", dict(content="FUCKYOU"))
assert "OK." in rv.data
assert rv.status_code == 201
feedback = session.query(Feedback).first()
assert feedback.content == "FUCKYOU"
| {
"content_hash": "9ec65486eb74a254a4d9e16c6b07ef2e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 31.77777777777778,
"alnum_prop": 0.6381118881118881,
"repo_name": "livoras/feifanote-server",
"id": "e64d7792386505c9edd417086366907299962e52",
"size": "596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_feedback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45502"
}
],
"symlink_target": ""
} |
"""d4rl_adroit_hammer dataset."""
from typing import Any
from tensorflow_datasets.d4rl import dataset_builder
import tensorflow_datasets.public_api as tfds
class D4rlAdroitHammer(dataset_builder.D4RLDatasetBuilder):
"""DatasetBuilder for d4rl_adroit_hammer dataset."""
VERSION = tfds.core.Version('1.1.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
'1.1.0': 'Added is_last.',
}
BUILDER_CONFIGS = dataset_builder.ADROIT_BUILDER_CONFIGS
def __init__(self, **kwargs: Any):
config = dataset_builder.DatasetConfig(
name='hammer', obs_len=46, action_len=26, qpos_len=33, qvel_len=33)
super().__init__(ds_config=config, **kwargs)
| {
"content_hash": "ab292ced3cdb9839a8d3911698cf0bcc",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 75,
"avg_line_length": 30.59090909090909,
"alnum_prop": 0.6909361069836553,
"repo_name": "tensorflow/datasets",
"id": "f2162ae668fcea83f5a0ef7cf27d2b6df77b3a17",
"size": "1285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_datasets/d4rl/d4rl_adroit_hammer/d4rl_adroit_hammer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "728"
},
{
"name": "JavaScript",
"bytes": "13369"
},
{
"name": "NewLisp",
"bytes": "13940"
},
{
"name": "Perl",
"bytes": "520"
},
{
"name": "Python",
"bytes": "5398856"
},
{
"name": "Roff",
"bytes": "22095"
},
{
"name": "Ruby",
"bytes": "25669"
},
{
"name": "Shell",
"bytes": "3895"
},
{
"name": "Smalltalk",
"bytes": "20604"
},
{
"name": "TeX",
"bytes": "759"
}
],
"symlink_target": ""
} |
from boolalg import to_cnf, And, Or, Not, Xor, Nand, Nor, Implies, Equivalent, ITE
from inference import satisfiable
| {
"content_hash": "bd40584759c5cc6f9884a7bd09dd6cb4",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 82,
"avg_line_length": 58.5,
"alnum_prop": 0.7692307692307693,
"repo_name": "Cuuuurzel/KiPyCalc",
"id": "98ccdfde4db5e02bd8621406baf6140adbe258c0",
"size": "117",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "sympy_old/logic/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20218182"
},
{
"name": "R",
"bytes": "1879"
},
{
"name": "XSLT",
"bytes": "732404"
}
],
"symlink_target": ""
} |
"""
=====================================
Creating MNE objects from data arrays
=====================================
In this simple example, the creation of MNE objects from
numpy arrays is demonstrated. In the last example case, a
NEO file format is used as a source for the data.
"""
# Author: Jaakko Leppakangas <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import neo
import mne
print(__doc__)
###############################################################################
# Create arbitrary data
sfreq = 1000 # Sampling frequency
times = np.arange(0, 10, 0.001) # Use 10000 samples (10s)
sin = np.sin(times * 10) # Multiplied by 10 for shorter cycles
cos = np.cos(times * 10)
sinX2 = sin * 2
cosX2 = cos * 2
# Numpy array of size 4 X 10000.
data = np.array([sin, cos, sinX2, cosX2])
# Definition of channel types and names.
ch_types = ['mag', 'mag', 'grad', 'grad']
ch_names = ['sin', 'cos', 'sinX2', 'cosX2']
###############################################################################
# Create an :class:`info <mne.Info>` object.
# It is also possible to use info from another raw object.
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
###############################################################################
# Create a dummy :class:`mne.io.RawArray` object
raw = mne.io.RawArray(data, info)
# Scaling of the figure.
# For actual EEG/MEG data different scaling factors should be used.
scalings = {'mag': 2, 'grad': 2}
raw.plot(n_channels=4, scalings=scalings, title='Data from arrays',
show=True, block=True)
# It is also possible to auto-compute scalings
scalings = 'auto' # Could also pass a dictionary with some value == 'auto'
raw.plot(n_channels=4, scalings=scalings, title='Auto-scaled Data from arrays',
show=True, block=True)
###############################################################################
# EpochsArray
event_id = 1 # This is used to identify the events.
# First column is for the sample number.
events = np.array([[200, 0, event_id],
[1200, 0, event_id],
[2000, 0, event_id]]) # List of three arbitrary events
# Here a data set of 700 ms epochs from 2 channels is
# created from sin and cos data.
# Any data in shape (n_epochs, n_channels, n_times) can be used.
epochs_data = np.array([[sin[:700], cos[:700]],
[sin[1000:1700], cos[1000:1700]],
[sin[1800:2500], cos[1800:2500]]])
ch_names = ['sin', 'cos']
ch_types = ['mag', 'mag']
info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
epochs = mne.EpochsArray(epochs_data, info=info, events=events,
event_id={'arbitrary': 1})
picks = mne.pick_types(info, meg=True, eeg=False, misc=False)
epochs.plot(picks=picks, scalings='auto', show=True, block=True)
###############################################################################
# EvokedArray
nave = len(epochs_data) # Number of averaged epochs
evoked_data = np.mean(epochs_data, axis=0)
evokeds = mne.EvokedArray(evoked_data, info=info, tmin=-0.2,
comment='Arbitrary', nave=nave)
evokeds.plot(picks=picks, show=True, units={'mag': '-'},
titles={'mag': 'sin and cos averaged'}, time_unit='s')
###############################################################################
# Create epochs by windowing the raw data.
# The events are spaced evenly every 1 second.
duration = 1.
# create a fixed size events array
# start=0 and stop=None by default
events = mne.make_fixed_length_events(raw, event_id, duration=duration)
print(events)
# for fixed size events no start time before and after event
tmin = 0.
tmax = 0.99 # inclusive tmax, 1 second epochs
# create :class:`Epochs <mne.Epochs>` object
epochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax, baseline=None, verbose=True)
epochs.plot(scalings='auto', block=True)
###############################################################################
# Create overlapping epochs using :func:`mne.make_fixed_length_events` (50 %
# overlap). This also roughly doubles the amount of events compared to the
# previous event list.
duration = 0.5
events = mne.make_fixed_length_events(raw, event_id, duration=duration)
print(events)
epochs = mne.Epochs(raw, events=events, tmin=tmin, tmax=tmax, baseline=None,
verbose=True)
epochs.plot(scalings='auto', block=True)
###############################################################################
# Extracting data from NEO file
# The example here uses the ExampleIO object for creating fake data.
# For actual data and different file formats, consult the NEO documentation.
reader = neo.io.ExampleIO('fakedata.nof')
bl = reader.read(lazy=False)[0]
# Get data from first (and only) segment
seg = bl.segments[0]
title = seg.file_origin
ch_names = list()
data = list()
for ai, asig in enumerate(seg.analogsignals):
# Since the data does not contain channel names, channel indices are used.
ch_names.append('Neo %02d' % (ai + 1,))
# We need the ravel() here because Neo < 0.5 gave 1D, Neo 0.5 gives
# 2D (but still a single channel).
data.append(asig.rescale('V').magnitude.ravel())
data = np.array(data, float)
sfreq = int(seg.analogsignals[0].sampling_rate.magnitude)
# By default, the channel types are assumed to be 'misc'.
info = mne.create_info(ch_names=ch_names, sfreq=sfreq)
raw = mne.io.RawArray(data, info)
raw.plot(n_channels=4, scalings={'misc': 1}, title='Data from NEO',
show=True, block=True, clipping='clamp')
| {
"content_hash": "5320af6e7861fa22d00392765f46d90c",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 79,
"avg_line_length": 34.457317073170735,
"alnum_prop": 0.5937002300477792,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "c7444b149c29ad9ed1d558efddc9b05408849269",
"size": "5651",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "0.17/_downloads/90a2600dabbc6e1dea5d888cc4fdf13a/plot_objects_from_arrays.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
} |
"""
A module for helper functions to read SNANA simulations
"""
from __future__ import absolute_import
import numpy as np
import sncosmo
try:
from . import filters
except:
print('This may fail without the throughputs directory')
__all__ = ['SnanaSims']
class SnanaSims(object):
"""
class to hold data from SNANA simulations and methods to manipulate the
data
Attributes
----------
snList : list of `~astropy.table.Table`
each Table contains a light curve of a SN.
"""
def __init__(self, headfile, photfile, snids=None, n=None):
"""
Instantiate class from SNANA simulation output files in fits format
Parameters
----------
snids : integer/string, optional defaults to None
if not None, only SN observations corresponding to SNID snid
are loaded
n : Integer, defaults to None
if not None, only the first n SN light curves are loaded
..note: The column names of the SNANA data files are not reformated
for SNCosmo use
"""
self.snList = sncosmo.read_snana_fits(head_file=headfile,
phot_file=photfile,
snids=snids, n=n)
@classmethod
def fromSNANAfileroot(cls, snanafileroot, location='./', snids=None,
n=None):
"""
Class constructor from a root file and a location
Parameters
----------
snanafileroot : string, mandatory
root file name for the SNANA which is the prefix to
'_HEAD.FITS', or '_PHOT.FITS'
location : string, optional defaults to current working directory './'
Relative or absolute path to the directory where the head and phot
files are located
snids : integer/string, optional defaults to None
if not None, only SN observations corresponding to SNID snid
are loaded
n : Integer, defaults to None
if not None, only the first n SN light curves are loaded
"""
headfile = cls.snanadatafile(snanafileroot, filetype='head',
location=location)
photfile = cls.snanadatafile(snanafileroot, filetype='phot',
location=location)
print headfile
data = sncosmo.read_snana_fits(head_file=headfile,
phot_file=photfile,
snids=snids, n=n)
return cls(headFile=headfile, photFile=photfile, snids=snids,
n=n)
@staticmethod
def snanadatafile(snanafileroot, filetype='head', location='./'):
'''
obtain the name of the head or phot file of an SNANA simulation
and dataset
Parameters
----------
snanafileroot : string, mandatory
root file name for the SNANA which is the prefix to
'_HEAD.FITS', or '_PHOT.FITS'
filetype : string, optional defaults to 'head'
'head' or 'phot' depending on whether a summary file or a photometry
file is being used.
location : string, optional defaults to current working directory './'
relative or absolute path to the directory in which the file is
located
Returns
-------
string : absolute path to the SNANA file
'''
import os
desiredfiletype = ['head', 'phot']
filetype = filetype.lower()
if not filetype in desiredfiletype:
raise ValueError(
'filetype should be one of "head" or "phot"', filetype)
location = os.path.abspath(location)
suffix = '_HEAD.FITS'
if filetype.lower() == 'phot':
suffix = '_PHOT.FITS'
fname = snanafileroot + suffix
return os.path.join(location, fname)
@staticmethod
def addbandstoSN(sn, snanaBands, replacement):
'''
add a column called 'band' to the `~astropy.Table.Table` by
applying the map of lsstbands to replacements to the content
of a column called 'FLT'
Parameters
----------
sn: `~astropy.Table.Table` obtained by reading an SNANA light curve
snanaBands: list of strings, mandatory
list of strings representing the filters in sn, which can be found
by `np.unique(sn['FLT'])
replacements: list of strings, mandatory
list of strings representing the filters as registered in SNCosmo in
the same order as lsstbands
Returns
-------
`~astropy.Table.Table` with 'FLT' column removed and 'band' column added
'''
from astropy.table import Table
filterarray = np.zeros(len(sn), dtype='S8')
for i, flt in enumerate(snanaBands):
mask = sn['FLT'] == flt
filterarray[mask] = replacement[i]
band = Table.Column(filterarray, name='band', dtype='S8')
sn.add_column(band)
sn.remove_column('FLT')
@staticmethod
def reformat_SNANASN(sn, snanaBands=None, replacements=None):
'''
reformat an SNANA light curve for use with SNCosmo
Parameters
----------
sn: `~astropy.Table.Table`, mandatory
representing SNANA light curve
snanaBands: list of strings, optional defaults to None
list of unique strings in any of the 'FLT' column of SNANA files
replacements: list of strings, optional defaults to None
list of unique strings of the same size as lsstbands, and indexed in the
same order representing the keys in the sncosmo.bandpass registry for the
same filters
Returns
-------
`astropy.Table.Table` of the SNANA light curve reformatted for SNCosmo
'''
from astropy.table import Table
# rename cols to names SNCosmo understands
sn.rename_column("FLUXCAL", 'flux')
sn.rename_column("FLUXCALERR", 'fluxerr')
# Add in SNANA magic ZP and sys
sn["ZP"] = 27.5
sn["ZPSYS"] = 'ab'
if replacements is not None:
SnanaSims.addbandstoSN(sn, snanaBands, replacements)
else:
sn.rename_column('FLT', 'band')
return sn
@staticmethod
def matchSNANAbandnamesinregistry():
"""
Will have to build this along as we go, as I don't know the variety
of naming conventions
"""
bandPassList = ['u', 'g', 'r', 'i', 'z', 'y']
for bandpass in bandPassList:
band = sncosmo.get_bandpass('LSST_' + bandpass)
band.name = bandpass
if bandpass == 'y':
band.name = 'Y'
sncosmo.registry.register(band)
| {
"content_hash": "74e77e2be8ebf92fc15838541692bfe6",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 85,
"avg_line_length": 33.916666666666664,
"alnum_prop": 0.5753721636074577,
"repo_name": "lisaleemcb/sncosmo_lc_analysis",
"id": "32b0ae3090629d2577f6e9f82c4364724dfd547f",
"size": "6942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyzeSN/snanaSims.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10958421"
},
{
"name": "Python",
"bytes": "54139"
}
],
"symlink_target": ""
} |
"""This code example creates a new mobile line item. Mobile features needs to be
enabled in your account to use mobile targeting. To determine which line
items exist, run get_all_line_items.py. To determine which orders exist, run
get_all_orders.py. To determine which placements exist, run
get_all_placements.py."""
__author__ = '[email protected] (Jeff Sham)'
from datetime import date
import os
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201204')
# Set order that the created line item will belong to and add the id of a
# placement containing ad units with a mobile target platform.
order_id = 'INSERT_ORDER_ID_HERE'
targeted_placement_ids = ['INSERT_MOBILE_PLACEMENT_ID_HERE']
# Create the line item.
# Target the line items in the following manner:
# Target the Google device manufacturer (40100) but exclude the Nexus One
# device (604046).
# Target the iPhone 4 device submodel (640003).
line_item = {
'name': 'Mobile line item #%s' % Utils.GetUniqueName(),
'orderId': order_id,
'targetPlatform': 'MOBILE',
'targeting': {
'inventoryTargeting': {
'targetedPlacementIds': targeted_placement_ids
},
'technologyTargeting': {
'deviceManufacturerTargeting': {
'deviceManufacturers': [{'id': '40100'}],
'isTargeted': 'true'
},
'mobileDeviceTargeting': {
'targetedMobileDevices': [],
'excludedMobileDevices': [{'id': '604046'}]
},
'mobileDeviceSubmodelTargeting': {
'targetedMobileDeviceSubmodels': [{'id': '640003'}],
'excludedMobileDeviceSubmodels': []
}
}
},
'creativePlaceholders': [
{
'size': {
'width': '300',
'height': '250'
}
}
],
'startDateTimeType': 'IMMEDIATELY',
'lineItemType': 'STANDARD',
'endDateTime': {
'date': {
'year': str(date.today().year + 1),
'month': '9',
'day': '30'
},
'hour': '0',
'minute': '0',
'second': '0'
},
'costType': 'CPM',
'costPerUnit': {
'currencyCode': 'USD',
'microAmount': '2000000'
},
'creativeRotationType': 'EVEN',
'discountType': 'PERCENTAGE',
'unitsBought': '500000',
'unitType': 'IMPRESSIONS'
}
line_item = line_item_service.CreateLineItem(line_item)[0]
# Display results.
print ('Line item with id \'%s\', belonging to order id \'%s\', and named '
'\'%s\' was created.' % (line_item['id'], line_item['orderId'],
line_item['name']))
| {
"content_hash": "271155efa0c9ce00be8097fd2fe27018",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 32.770833333333336,
"alnum_prop": 0.5940877304513669,
"repo_name": "krux/adspygoogle",
"id": "6141303870c15ac7a59a296819c192fdd960ab0f",
"size": "3764",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201204/create_mobile_line_item.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "2263332"
}
],
"symlink_target": ""
} |
import webob
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
EXP_LINKS = {
'v2.0': {
'html': 'http://docs.openstack.org/'
},
'v3.0': {
'html': 'http://docs.openstack.org/'
},
}
EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v3/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
}
],
},
"v3.0": {
"id": "v3.0",
"status": "EXPERIMENTAL",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v3/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v3.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=3",
}
],
}
}
class VersionsTest(test.NoDBTestCase):
def test_get_version_list_302(self):
req = webob.Request.blank('/v3')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v3())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v3/')
self.assertEqual(res.location, redirect_req.url)
def test_get_version_3_detail(self):
req = webob.Request.blank('/v3/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v3())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": EXP_VERSIONS['v3.0']}
self.assertEqual(expected, version)
def test_get_version_3_versions_v3_detail(self):
req = webob.Request.blank('/v3/versions/v3.0')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v3())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": EXP_VERSIONS['v3.0']}
self.assertEqual(expected, version)
def test_get_version_3_versions_v2_detail(self):
req = webob.Request.blank('/v3/versions/v2.0')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v3())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": EXP_VERSIONS['v2.0']}
self.assertEqual(expected, version)
def test_get_version_3_versions_invalid(self):
req = webob.Request.blank('/v3/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v3())
self.assertEqual(res.status_int, 404)
self.assertEqual(res.content_type, "application/json")
def test_get_version_3_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=3"
res = req.get_response(fakes.wsgi_app_v3())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": EXP_VERSIONS['v3.0']}
self.assertEqual(expected, version)
| {
"content_hash": "e308947b757ab1c89c00a1ce465e1d42",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 75,
"avg_line_length": 31.946564885496183,
"alnum_prop": 0.5388291517323776,
"repo_name": "viggates/nova",
"id": "b3aab297613bbab5d35ff37fa1c07f3c8d656174",
"size": "4830",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/plugins/v3/test_versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14822788"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
} |
import numpy as np
import math
i_d = 0
i_u = 1
i_pbd = 2
i_pbu = 3
i_b = 4
i_ibt = 5
class Classifier:
def __init__(self, label):
self.label = label
self.inter_burst_times = np.array([])
self.num_burst = np.array([])
self.per_burst_dn = np.array([])
self.per_burst_up = np.array([])
self.tot_cells_dn = np.array([])
self.tot_cells_up = np.array([])
self.tot_distances = np.array([])
self.per_burst_distances = np.array([])
def euclidianDist(self, dn_0, up_0, dn_1, up_1):
return math.sqrt( (dn_0 - dn_1)**2 + (up_0 - up_1)**2 )
def trainIBT(self, ibt):
self.inter_burst_times = np.append(self.inter_burst_times, ibt)
def trainNumBursts(self, b):
self.num_burst = np.append(self.num_burst, b)
def trainPerBurstDistance(self):
for i in range(len(self.per_burst_dn)-1):
u0 = self.per_burst_up[-1]
d0 = self.per_burst_dn[-1]
u1 = self.per_burst_up[i]
d1 = self.per_burst_dn[i]
d = self.euclidianDist(d0, u0, d1, u1)
self.per_burst_distances = np.append(self.per_burst_distances, d)
def trainPerBurst(self, dn, up):
self.per_burst_dn = np.append(self.per_burst_dn, dn)
self.per_burst_up = np.append(self.per_burst_up, up)
if len(self.per_burst_dn) > 1:
self.trainPerBurstDistance()
def trainTotalDistance(self):
for i in range(len(self.tot_cells_dn)-1):
u0 = self.tot_cells_up[-1]
d0 = self.tot_cells_dn[-1]
u1 = self.tot_cells_up[i]
d1 = self.tot_cells_dn[i]
d = self.euclidianDist(d0, u0, d1, u1)
self.tot_distances = np.append(self.tot_distances, d)
def trainTotalCells(self, dn, up):
self.tot_cells_dn = np.append(self.tot_cells_dn, dn)
self.tot_cells_up = np.append(self.tot_cells_up, up)
if len(self.tot_cells_dn) > 1:
self.trainTotalDistance()
def train(self, metrics):
# Average inter-burst time
self.trainIBT(metrics[i_ibt])
# Number of bursts
self.trainNumBursts(metrics[i_b])
# Number of cells per burst
self.trainPerBurst(metrics[i_pbd], metrics[i_pbu])
# Total number of cells
self.trainTotalCells(metrics[i_d], metrics[i_u])
def meanTotalDistance(self):
return np.mean(self.tot_distances)
def meanPerBurstDistance(self):
return np.mean(self.per_burst_distances)
def IBTVote(self, ibt):
std = np.std(self.inter_burst_times)
mean = np.mean(self.inter_burst_times)
dist = abs(ibt - mean)
perfect_match = dist == 0 and std == 0
limit = 4.0
if perfect_match:
return 4.0
elif dist <= limit:
return 1.0
else:
return -1.0
def numBurstsVote(self, b):
std = np.std(self.num_burst)
mean = np.mean(self.num_burst)
dist = abs(b - mean)
perfect_match = dist <= 0.5 and std <= 0.5
limit = 5
if perfect_match:
return 4.0
elif dist <= limit:
return 1.0
else:
return -1.0
def perBurstRatioVote(self, ratio):
ratios = self.per_burst_dn/self.per_burst_up
mean = np.mean(ratios)
dist = abs(ratio - mean)
limit = mean/3.5
if dist <= limit:
return 1.0
else:
return -1.0
def totalRatioVote(self, ratio):
ratios = self.tot_cells_dn/self.tot_cells_up
mean = np.mean(ratios)
dist = abs(ratio - mean)
limit = mean/1.8
if dist <= limit:
return 1.0
else:
return -1.0
def perBurstDistance(self, metrics):
mean_dn = np.mean(self.per_burst_dn)
mean_up = np.mean(self.per_burst_up)
dn = metrics[i_pbd]
up = metrics[i_pbu]
return self.euclidianDist(dn, up, mean_dn, mean_up)
def totalDistance(self, metrics):
mean_dn = np.mean(self.tot_cells_dn)
mean_up = np.mean(self.tot_cells_up)
dn = metrics[i_d]
up = metrics[i_u]
return self.euclidianDist(dn, up, mean_dn, mean_up)
def predict(self, metrics):
vote = 0.0
# Average inter-burst time
vote += self.IBTVote(metrics[i_ibt])
# Number of bursts
vote += self.numBurstsVote(metrics[i_b])
# Cells per burst ratio
vote += self.perBurstRatioVote(metrics[i_pbd]/float(metrics[i_pbu]))
vote += self.totalRatioVote(metrics[i_d]/metrics[i_u])
return vote | {
"content_hash": "b34b65c83e883d3b65f92e493facf375",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 70,
"avg_line_length": 25.095541401273884,
"alnum_prop": 0.6593908629441624,
"repo_name": "chhans/tor-automation",
"id": "f104288ca918162fddfe19620d7bc18861b97dc5",
"size": "3940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "12183"
},
{
"name": "Python",
"bytes": "33666"
}
],
"symlink_target": ""
} |
from openturns import *
TESTPREAMBLE()
try:
try:
# Test function operator ()
levelFunction = NumericalMathFunction(
["x1", "x2", "x3", "x4"], ["y1"], ["x1+2*x2-3*x3+4*x4"])
specific = CobylaSpecificParameters()
startingPoint = NumericalPoint(4, 1.0)
myAlgorithm = Cobyla(specific, levelFunction)
myAlgorithm.setStartingPoint(startingPoint)
myAlgorithm.setLevelValue(3.0)
myAlgorithm.setMaximumIterationsNumber(100)
myAlgorithm.setMaximumAbsoluteError(1.0e-10)
myAlgorithm.setMaximumRelativeError(1.0e-10)
myAlgorithm.setMaximumResidualError(1.0e-10)
myAlgorithm.setMaximumConstraintError(1.0e-10)
print "myAlgorithm = ", repr(myAlgorithm)
except:
raise
except:
import sys
print "t_Cobyla_std.py", sys.exc_type, sys.exc_value
| {
"content_hash": "b1114400f8bda14b21b12ce440513aa3",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 32.18518518518518,
"alnum_prop": 0.6570771001150748,
"repo_name": "sofianehaddad/ot-svn",
"id": "c897e6b6c060c23fa0b69059f341f27adc992c27",
"size": "893",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/t_Cobyla_std.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6498"
},
{
"name": "C",
"bytes": "455749"
},
{
"name": "C++",
"bytes": "10021345"
},
{
"name": "CMake",
"bytes": "240050"
},
{
"name": "FORTRAN",
"bytes": "299"
},
{
"name": "Makefile",
"bytes": "12372"
},
{
"name": "NSIS",
"bytes": "26263"
},
{
"name": "Python",
"bytes": "1221927"
},
{
"name": "R",
"bytes": "11141"
},
{
"name": "Scilab",
"bytes": "2612"
},
{
"name": "Shell",
"bytes": "20403"
},
{
"name": "TeX",
"bytes": "4250"
},
{
"name": "Visual Basic",
"bytes": "3294"
}
],
"symlink_target": ""
} |
from fs.mountfs import MountFS
from fs.memoryfs import MemoryFS
import unittest
class TestMountFS(unittest.TestCase):
def test_auto_close(self):
"""Test MountFS auto close is working"""
multi_fs = MountFS()
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.mount('/m1', m1)
multi_fs.mount('/m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(m1.closed)
self.assert_(m2.closed)
def test_no_auto_close(self):
"""Test MountFS auto close can be disabled"""
multi_fs = MountFS(auto_close=False)
m1 = MemoryFS()
m2 = MemoryFS()
multi_fs.mount('/m1', m1)
multi_fs.mount('/m2', m2)
self.assert_(not m1.closed)
self.assert_(not m2.closed)
multi_fs.close()
self.assert_(not m1.closed)
self.assert_(not m2.closed)
def test_mountfile(self):
"""Test mounting a file"""
quote = b"""If you wish to make an apple pie from scratch, you must first invent the universe."""
mem_fs = MemoryFS()
mem_fs.makedir('foo')
mem_fs.setcontents('foo/bar.txt', quote)
foo_dir = mem_fs.opendir('foo')
mount_fs = MountFS()
mount_fs.mountfile('bar.txt', foo_dir.open, foo_dir.getinfo)
self.assert_(mount_fs.isdir('/'))
self.assert_(mount_fs.isdir('./'))
self.assert_(mount_fs.isdir(''))
# Check we can see the mounted file in the dir list
self.assertEqual(mount_fs.listdir(), ["bar.txt"])
self.assert_(not mount_fs.exists('nobodyhere.txt'))
self.assert_(mount_fs.exists('bar.txt'))
self.assert_(mount_fs.isfile('bar.txt'))
self.assert_(not mount_fs.isdir('bar.txt'))
# Check open and getinfo callables
self.assertEqual(mount_fs.getcontents('bar.txt'), quote)
self.assertEqual(mount_fs.getsize('bar.txt'), len(quote))
# Check changes are written back
mem_fs.setcontents('foo/bar.txt', 'baz')
self.assertEqual(mount_fs.getcontents('bar.txt'), b'baz')
self.assertEqual(mount_fs.getsize('bar.txt'), len('baz'))
# Check changes are written to the original fs
self.assertEqual(mem_fs.getcontents('foo/bar.txt'), b'baz')
self.assertEqual(mem_fs.getsize('foo/bar.txt'), len('baz'))
# Check unmount
self.assert_(mount_fs.unmount("bar.txt"))
self.assertEqual(mount_fs.listdir(), [])
self.assert_(not mount_fs.exists('bar.txt'))
# Check unount a second time is a null op, and returns False
self.assertFalse(mount_fs.unmount("bar.txt"))
| {
"content_hash": "988e69ba9764cf26bc4b078671968cef",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 105,
"avg_line_length": 35.88,
"alnum_prop": 0.5982905982905983,
"repo_name": "alekibango/pyfilesystem",
"id": "b2a3adaa7c766027b77edb92c855f047d71709f2",
"size": "2691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fs/tests/test_mountfs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1052786"
},
{
"name": "Shell",
"bytes": "3083"
}
],
"symlink_target": ""
} |
"""
Program to test that all methods/functions have at least one example
doctest. Also checks if docstrings are imported into Sphinx. For this to
work, the Sphinx docs need to be built first. Use "cd doc; make html" to
build the Sphinx docs.
Usage:
./bin/coverage_doctest.py sympy/core
or
./bin/coverage_doctest.py sympy/core/basic.py
If no arguments are given, all files in sympy/ are checked.
"""
from __future__ import print_function
import os
import sys
import re
import string
import inspect
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from HTMLParser import HTMLParser
# Load color templates, used from sympy/utilities/runtests.py
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
def print_header(name, underline=None, color=None):
print()
if color:
print("%s%s%s" % (c_color % colors[color], name, c_normal))
else:
print(name)
if underline and not color:
print(underline*len(name))
def print_coverage(module_path, c, c_md, c_mdt, c_idt, c_sph, f, f_md, f_mdt,
f_idt, f_sph, score, total_doctests, total_members,
sphinx_score, total_sphinx, verbose=False, no_color=False,
sphinx=True):
""" Prints details (depending on verbose) of a module """
doctest_color = "Brown"
sphinx_color = "DarkGray"
less_100_color = "Red"
less_50_color = "LightRed"
equal_100_color = "Green"
big_header_color = "LightPurple"
small_header_color = "Purple"
if no_color:
score_string = "Doctests: %s%% (%s of %s)" % (score, total_doctests,
total_members)
elif score < 100:
if score < 50:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[less_50_color], score, total_doctests, total_members, c_normal)
else:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[less_100_color], score, total_doctests, total_members, c_normal)
else:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[equal_100_color], score, total_doctests, total_members, c_normal)
if sphinx:
if no_color:
sphinx_score_string = "Sphinx: %s%% (%s of %s)" % (sphinx_score,
total_members - total_sphinx, total_members)
elif sphinx_score < 100:
if sphinx_score < 50:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[less_50_color], sphinx_score, total_members - total_sphinx,
total_members, c_normal)
else:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[less_100_color], sphinx_score, total_members -
total_sphinx, total_members, c_normal)
else:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[equal_100_color], sphinx_score, total_members -
total_sphinx, total_members, c_normal)
if verbose:
print('\n' + '-'*70)
print(module_path)
print('-'*70)
else:
if sphinx:
print("%s: %s %s" % (module_path, score_string, sphinx_score_string))
else:
print("%s: %s" % (module_path, score_string))
if verbose:
print_header('CLASSES', '*', not no_color and big_header_color)
if not c:
print_header('No classes found!')
else:
if c_md:
print_header('Missing docstrings', '-', not no_color and small_header_color)
for md in c_md:
print(' * ' + md)
if c_mdt:
print_header('Missing doctests', '-', not no_color and small_header_color)
for md in c_mdt:
print(' * ' + md)
if c_idt:
# Use "# indirect doctest" in the docstring to
# supress this warning.
print_header('Indirect doctests', '-', not no_color and small_header_color)
for md in c_idt:
print(' * ' + md)
print('\n Use \"# indirect doctest\" in the docstring to supress this warning')
if c_sph:
print_header('Not imported into Sphinx', '-', not no_color and small_header_color)
for md in c_sph:
print(' * ' + md)
print_header('FUNCTIONS', '*', not no_color and big_header_color)
if not f:
print_header('No functions found!')
else:
if f_md:
print_header('Missing docstrings', '-', not no_color and small_header_color)
for md in f_md:
print(' * ' + md)
if f_mdt:
print_header('Missing doctests', '-', not no_color and small_header_color)
for md in f_mdt:
print(' * ' + md)
if f_idt:
print_header('Indirect doctests', '-', not no_color and small_header_color)
for md in f_idt:
print(' * ' + md)
print('\n Use \"# indirect doctest\" in the docstring to supress this warning')
if f_sph:
print_header('Not imported into Sphinx', '-', not no_color and small_header_color)
for md in f_sph:
print(' * ' + md)
if verbose:
print('\n' + '-'*70)
print(score_string)
if sphinx:
print(sphinx_score_string)
print('-'*70)
def _is_indirect(member, doc):
""" Given string repr of doc and member checks if the member
contains indirect documentation """
d = member in doc
e = 'indirect doctest' in doc
if not d and not e:
return True
else:
return False
def _get_arg_list(name, fobj):
""" Given a function object, constructs a list of arguments
and their defaults. Takes care of varargs and kwargs """
trunc = 20 # Sometimes argument length can be huge
argspec = inspect.getargspec(fobj)
arg_list = []
if argspec.args:
for arg in argspec.args:
arg_list.append(str(arg))
arg_list.reverse()
# Now add the defaults
if argspec.defaults:
rev_defaults = list(argspec.defaults).reverse()
for i in range(len(argspec.defaults)):
arg_list[i] = str(arg_list[i]) + '=' + str(argspec.defaults[-i])
# Get the list in right order
arg_list.reverse()
# Add var args
if argspec.varargs:
arg_list.append(argspec.varargs)
if argspec.keywords:
arg_list.append(argspec.keywords)
# Truncate long arguments
arg_list = map(lambda x: x[:trunc], arg_list)
# Construct the parameter string (enclosed in brackets)
str_param = "%s(%s)" % (name, ', '.join(arg_list))
return str_param
def get_mod_name(path, base):
""" Gets a module name, given the path of file/dir and base
dir of sympy """
rel_path = os.path.relpath(path, base)
# Remove the file extension
rel_path, ign = os.path.splitext(rel_path)
# Replace separators by . for module path
file_module = ""
h, t = os.path.split(rel_path)
while h or t:
if t:
file_module = t + '.' + file_module
h, t = os.path.split(h)
return file_module[:-1]
class FindInSphinx(HTMLParser):
is_imported = []
def handle_starttag(self, tag, attr):
a = dict(attr)
if tag == "div" and a.get('class', None) == "viewcode-block":
self.is_imported.append(a['id'])
def find_sphinx(name, mod_path, found={}):
if mod_path in found: # Cache results
return name in found[mod_path]
doc_path = mod_path.split('.')
doc_path[-1] += '.html'
sphinx_path = os.path.join(sympy_top, 'doc', '_build', 'html', '_modules', *doc_path)
if not os.path.exists(sphinx_path):
return False
with open(sphinx_path) as f:
html_txt = f.read()
p = FindInSphinx()
p.feed(html_txt)
found[mod_path] = p.is_imported
return name in p.is_imported
def process_function(name, c_name, b_obj, mod_path, f_sk, f_md, f_mdt, f_idt,
f_has_doctest, sk_list, sph, sphinx=True):
"""
Processes a function to get information regarding documentation.
It is assume that the function calling this subrouting has already
verified that it is a valid module function.
"""
if name in sk_list:
return False, False
# We add in the end, as inspect.getsourcelines is slow
add_md = False
add_mdt = False
add_idt = False
in_sphinx = True
f_doctest = False
function = False
if inspect.isclass(b_obj):
obj = getattr(b_obj, name)
obj_name = c_name + '.' + name
else:
obj = b_obj
obj_name = name
full_name = _get_arg_list(name, obj)
if name.startswith('_'):
f_sk.append(full_name)
else:
if not obj.__doc__:
add_md = True
elif not '>>>' in obj.__doc__:
add_mdt = True
elif _is_indirect(name, obj.__doc__):
add_idt = True
else:
f_doctest = True
function = True
if sphinx:
in_sphinx = find_sphinx(obj_name, mod_path)
if add_md or add_mdt or add_idt or not in_sphinx:
try:
line_no = inspect.getsourcelines(obj)[1]
except IOError:
# Raised when source does not exist
# which means the function is not there.
return False, False
full_name = "LINE %d: %s" % (line_no, full_name)
if add_md:
f_md.append(full_name)
elif add_mdt:
f_mdt.append(full_name)
elif add_idt:
f_idt.append(full_name)
if not in_sphinx:
sph.append(full_name)
return f_doctest, function
def process_class(c_name, obj, c_sk, c_md, c_mdt, c_idt, c_has_doctest,
mod_path, sph, sphinx=True):
"""
Extracts information about the class regarding documentation.
It is assumed that the function calling this subroutine has already
checked that the class is valid.
"""
# Skip class case
if c_name.startswith('_'):
c_sk.append(c_name)
return False, False, None
c = False
c_dt = False
# Get the line number of class
try:
source, line_no = inspect.getsourcelines(obj)
except IOError:
# Raised when source does not exist
# which means the class is not there.
return False, False, None
c = True
full_name = "LINE %d: %s" % (line_no, c_name)
if not obj.__doc__:
c_md.append(full_name)
elif not '>>>' in obj.__doc__:
c_mdt.append(full_name)
elif _is_indirect(c_name, obj.__doc__):
c_idt.append(full_name)
else:
c_dt = True
c_has_doctest.append(full_name)
in_sphinx = False
if sphinx:
in_sphinx = find_sphinx(c_name, mod_path)
if not in_sphinx:
sph.append(full_name)
return c_dt, c, source
def coverage(module_path, verbose=False, no_color=False, sphinx=True):
""" Given a module path, builds an index of all classes and functions
contained. It then goes through each of the classes/functions to get
the docstring and doctest coverage of the module. """
# Import the package and find membmers
m = None
try:
__import__(module_path)
m = sys.modules[module_path]
except Exception as a:
# Most likely cause, absence of __init__
print("%s could not be loaded due to %s." % (module_path, repr(a)))
return 0, 0, 0
c_skipped = []
c_md = []
c_mdt = []
c_has_doctest = []
c_idt = []
classes = 0
c_doctests = 0
c_sph = []
f_skipped = []
f_md = []
f_mdt = []
f_has_doctest = []
f_idt = []
functions = 0
f_doctests = 0
f_sph = []
skip_members = ['__abstractmethods__']
# Get the list of members
m_members = dir(m)
for member in m_members:
# Check for skipped functions first, they throw nasty errors
# when combined with getattr
if member in skip_members:
continue
# Identify if the member (class/def) a part of this module
obj = getattr(m, member)
obj_mod = inspect.getmodule(obj)
# Function not a part of this module
if not obj_mod or not obj_mod.__name__ == module_path:
continue
# If it's a function
if inspect.isfunction(obj) or inspect.ismethod(obj):
f_dt, f = process_function(member, '', obj, module_path,
f_skipped, f_md, f_mdt, f_idt, f_has_doctest, skip_members,
f_sph, sphinx=sphinx)
if f:
functions += 1
if f_dt:
f_doctests += 1
# If it's a class, look at it's methods too
elif inspect.isclass(obj):
# Process the class first
c_dt, c, source = process_class(member, obj, c_skipped, c_md,
c_mdt, c_idt, c_has_doctest, module_path, c_sph, sphinx=sphinx)
if not c:
continue
else:
classes += 1
if c_dt:
c_doctests += 1
# Iterate through it's members
for f_name in obj.__dict__:
if f_name in skip_members or f_name.startswith('_'):
continue
# Check if def funcname appears in source
if not ("def " + f_name) in ' '.join(source):
continue
# Identify the module of the current class member
f_obj = getattr(obj, f_name)
obj_mod = inspect.getmodule(f_obj)
# Function not a part of this module
if not obj_mod or not obj_mod.__name__ == module_path:
continue
# If it's a function
if inspect.isfunction(f_obj) or inspect.ismethod(f_obj):
f_dt, f = process_function(f_name, member, obj,
module_path, f_skipped, f_md, f_mdt, f_idt, f_has_doctest,
skip_members, f_sph, sphinx=sphinx)
if f:
functions += 1
if f_dt:
f_doctests += 1
# Evaluate the percent coverage
total_doctests = c_doctests + f_doctests
total_members = classes + functions
if total_members:
score = 100 * float(total_doctests) / (total_members)
else:
score = 100
score = int(score)
if sphinx:
total_sphinx = len(c_sph) + len(f_sph)
if total_members:
sphinx_score = 100 - 100 * float(total_sphinx) / total_members
else:
sphinx_score = 100
sphinx_score = int(sphinx_score)
else:
total_sphinx = 0
sphinx_score = 0
# Sort functions/classes by line number
c_md = sorted(c_md, key=lambda x: int(x.split()[1][:-1]))
c_mdt = sorted(c_mdt, key=lambda x: int(x.split()[1][:-1]))
c_idt = sorted(c_idt, key=lambda x: int(x.split()[1][:-1]))
f_md = sorted(f_md, key=lambda x: int(x.split()[1][:-1]))
f_mdt = sorted(f_mdt, key=lambda x: int(x.split()[1][:-1]))
f_idt = sorted(f_idt, key=lambda x: int(x.split()[1][:-1]))
print_coverage(module_path, classes, c_md, c_mdt, c_idt, c_sph, functions, f_md,
f_mdt, f_idt, f_sph, score, total_doctests, total_members,
sphinx_score, total_sphinx, verbose=verbose,
no_color=no_color, sphinx=sphinx)
return total_doctests, total_sphinx, total_members
def go(sympy_top, file, verbose=False, no_color=False, exact=True, sphinx=True):
if os.path.isdir(file):
doctests, total_sphinx, num_functions = 0, 0, 0
for F in os.listdir(file):
_doctests, _total_sphinx, _num_functions = go(sympy_top, '%s/%s' % (file, F),
verbose=verbose, no_color=no_color, exact=False, sphinx=sphinx)
doctests += _doctests
total_sphinx += _total_sphinx
num_functions += _num_functions
return doctests, total_sphinx, num_functions
if (not (file.endswith('.py') or file.endswith('.pyx')) or
file.endswith('__init__.py') or
not exact and ('test_' in file or 'bench_' in file or
any(name in file for name in skip_paths))):
return 0, 0, 0
if not os.path.exists(file):
print("File(%s does not exist." % file)
sys.exit(1)
# Relpath for constructing the module name
return coverage(get_mod_name(file, sympy_top), verbose=verbose,
no_color=no_color, sphinx=sphinx)
if __name__ == "__main__":
bintest_dir = os.path.abspath(os.path.dirname(__file__)) # bin/cover...
sympy_top = os.path.split(bintest_dir)[0] # ../
sympy_dir = os.path.join(sympy_top, 'sympy') # ../sympy/
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
skip_paths = ['mpmath']
usage = "usage: ./bin/doctest_coverage.py PATHS"
parser = ArgumentParser(
description=__doc__,
usage=usage,
formatter_class=RawDescriptionHelpFormatter,
)
parser.add_argument("path", nargs='*', default=[os.path.join(sympy_top, 'sympy')])
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
default=False)
parser.add_argument("--no-colors", action="store_true", dest="no_color",
help="use no colors", default=False)
parser.add_argument("--no-sphinx", action="store_false", dest="sphinx",
help="don't report Sphinx coverage", default=True)
args = parser.parse_args()
if args.sphinx and not os.path.exists(os.path.join(sympy_top, 'doc', '_build', 'html')):
print("""
Cannot check Sphinx coverage without a documentation build. To build the
docs, run "cd doc; make html". To skip checking Sphinx coverage, pass --no-sphinx.
""")
sys.exit(1)
full_coverage = True
for file in args.path:
file = os.path.normpath(file)
print('DOCTEST COVERAGE for %s' % (file))
print('='*70)
print()
doctests, total_sphinx, num_functions = go(sympy_top, file, verbose=args.verbose,
no_color=args.no_color, sphinx=args.sphinx)
if num_functions == 0:
score = 100
sphinx_score = 100
else:
score = 100 * float(doctests) / num_functions
score = int(score)
if doctests < num_functions:
full_coverage = False
if args.sphinx:
sphinx_score = 100 - 100 * float(total_sphinx) / num_functions
sphinx_score = int(sphinx_score)
if total_sphinx > 0:
full_coverage = False
print()
print('='*70)
if args.no_color:
print("TOTAL DOCTEST SCORE for %s: %s%% (%s of %s)" % \
(get_mod_name(file, sympy_top), score, doctests, num_functions))
elif score < 100:
print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Red"]),
score, doctests, num_functions, c_normal))
else:
print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Green"]),
score, doctests, num_functions, c_normal))
if args.sphinx:
if args.no_color:
print("TOTAL SPHINX SCORE for %s: %s%% (%s of %s)" % \
(get_mod_name(file, sympy_top), sphinx_score,
num_functions - total_sphinx, num_functions))
elif sphinx_score < 100:
print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Red"]),
sphinx_score, num_functions - total_sphinx, num_functions, c_normal))
else:
print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Green"]),
sphinx_score, num_functions - total_sphinx, num_functions, c_normal))
print()
sys.exit(not full_coverage)
| {
"content_hash": "9321cef804a530b776f27e245ffeb982",
"timestamp": "",
"source": "github",
"line_count": 648,
"max_line_length": 141,
"avg_line_length": 33.02160493827161,
"alnum_prop": 0.5467333395644453,
"repo_name": "hrashk/sympy",
"id": "944157b1a35a203d85ccccca4c62477542302182",
"size": "21421",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bin/coverage_doctest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13971941"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "1300"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import tempfile
from target_test import FileSystemTargetTestMixin
from helpers import with_config, unittest
from boto.exception import S3ResponseError
from boto.s3 import key
from moto import mock_s3
from luigi import configuration
from luigi.s3 import FileNotFoundException, InvalidDeleteException, S3Client, S3Target
if (3, 4, 0) <= sys.version_info[:3] < (3, 4, 3):
# spulec/moto#308
raise unittest.SkipTest('moto mock doesn\'t work with python3.4')
AWS_ACCESS_KEY = "XXXXXXXXXXXXXXXXXXXX"
AWS_SECRET_KEY = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
class TestS3Target(unittest.TestCase, FileSystemTargetTestMixin):
def setUp(self):
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
self.tempFileContents = (
b"I'm a temporary file for testing\nAnd this is the second line\n"
b"This is the third.")
self.tempFilePath = f.name
f.write(self.tempFileContents)
f.close()
self.mock_s3 = mock_s3()
self.mock_s3.start()
def tearDown(self):
os.remove(self.tempFilePath)
self.mock_s3.stop()
def create_target(self, format=None):
client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
client.s3.create_bucket('mybucket')
return S3Target('s3://mybucket/test_file', client=client, format=format)
def test_read(self):
client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
client.s3.create_bucket('mybucket')
client.put(self.tempFilePath, 's3://mybucket/tempfile')
t = S3Target('s3://mybucket/tempfile', client=client)
read_file = t.open()
file_str = read_file.read()
self.assertEqual(self.tempFileContents, file_str.encode('utf-8'))
def test_read_no_file(self):
t = self.create_target()
self.assertRaises(FileNotFoundException, t.open)
def test_read_iterator_long(self):
# write a file that is 5X the boto buffersize
# to test line buffering
old_buffer = key.Key.BufferSize
key.Key.BufferSize = 2
try:
tempf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
temppath = tempf.name
firstline = ''.zfill(key.Key.BufferSize * 5) + os.linesep
contents = firstline + 'line two' + os.linesep + 'line three'
tempf.write(contents.encode('utf-8'))
tempf.close()
client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
client.s3.create_bucket('mybucket')
client.put(temppath, 's3://mybucket/largetempfile')
t = S3Target('s3://mybucket/largetempfile', client=client)
with t.open() as read_file:
lines = [line for line in read_file]
finally:
key.Key.BufferSize = old_buffer
self.assertEqual(3, len(lines))
self.assertEqual(firstline, lines[0])
self.assertEqual("line two" + os.linesep, lines[1])
self.assertEqual("line three", lines[2])
class TestS3Client(unittest.TestCase):
def setUp(self):
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
self.tempFilePath = f.name
f.write(b"I'm a temporary file for testing\n")
f.close()
self.mock_s3 = mock_s3()
self.mock_s3.start()
def tearDown(self):
os.remove(self.tempFilePath)
self.mock_s3.stop()
def test_init_with_environment_variables(self):
os.environ['AWS_ACCESS_KEY_ID'] = 'foo'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'bar'
# Don't read any exsisting config
old_config_paths = configuration.LuigiConfigParser._config_paths
configuration.LuigiConfigParser._config_paths = [tempfile.mktemp()]
s3_client = S3Client()
configuration.LuigiConfigParser._config_paths = old_config_paths
self.assertEqual(s3_client.s3.gs_access_key_id, 'foo')
self.assertEqual(s3_client.s3.gs_secret_access_key, 'bar')
@with_config({'s3': {'aws_access_key_id': 'foo', 'aws_secret_access_key': 'bar'}})
def test_init_with_config(self):
s3_client = S3Client()
self.assertEqual(s3_client.s3.access_key, 'foo')
self.assertEqual(s3_client.s3.secret_key, 'bar')
def test_put(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put(self.tempFilePath, 's3://mybucket/putMe')
self.assertTrue(s3_client.exists('s3://mybucket/putMe'))
def test_put_string(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_string("SOMESTRING", 's3://mybucket/putString')
self.assertTrue(s3_client.exists('s3://mybucket/putString'))
def test_put_multipart_multiple_parts_non_exact_fit(self):
"""
Test a multipart put with two parts, where the parts are not exactly the split size.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = (part_size * 2) - 5000
self._run_multipart_test(part_size, file_size)
def test_put_multipart_multiple_parts_exact_fit(self):
"""
Test a multipart put with multiple parts, where the parts are exactly the split size.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = part_size * 2
self._run_multipart_test(part_size, file_size)
def test_put_multipart_less_than_split_size(self):
"""
Test a multipart put with a file smaller than split size; should revert to regular put.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = 5000
self._run_multipart_test(part_size, file_size)
def test_put_multipart_empty_file(self):
"""
Test a multipart put with an empty file.
"""
# 5MB is minimum part size
part_size = (1024 ** 2) * 5
file_size = 0
self._run_multipart_test(part_size, file_size)
def test_exists(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
self.assertTrue(s3_client.exists('s3://mybucket/'))
self.assertTrue(s3_client.exists('s3://mybucket'))
self.assertFalse(s3_client.exists('s3://mybucket/nope'))
self.assertFalse(s3_client.exists('s3://mybucket/nope/'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempfile')
self.assertTrue(s3_client.exists('s3://mybucket/tempfile'))
self.assertFalse(s3_client.exists('s3://mybucket/temp'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir0_$folder$')
self.assertTrue(s3_client.exists('s3://mybucket/tempdir0'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir1/')
self.assertTrue(s3_client.exists('s3://mybucket/tempdir1'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir2/subdir')
self.assertTrue(s3_client.exists('s3://mybucket/tempdir2'))
self.assertFalse(s3_client.exists('s3://mybucket/tempdir'))
def test_get_key(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put(self.tempFilePath, 's3://mybucket/key_to_find')
self.assertTrue(s3_client.get_key('s3://mybucket/key_to_find'))
self.assertFalse(s3_client.get_key('s3://mybucket/does_not_exist'))
def test_is_dir(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
self.assertTrue(s3_client.is_dir('s3://mybucket'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir0_$folder$')
self.assertTrue(s3_client.is_dir('s3://mybucket/tempdir0'))
s3_client.put(self.tempFilePath, 's3://mybucket/tempdir1/')
self.assertTrue(s3_client.is_dir('s3://mybucket/tempdir1'))
s3_client.put(self.tempFilePath, 's3://mybucket/key')
self.assertFalse(s3_client.is_dir('s3://mybucket/key'))
def test_remove(self):
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
self.assertRaises(
S3ResponseError,
lambda: s3_client.remove('s3://bucketdoesnotexist/file')
)
self.assertFalse(s3_client.remove('s3://mybucket/doesNotExist'))
s3_client.put(self.tempFilePath, 's3://mybucket/existingFile0')
self.assertTrue(s3_client.remove('s3://mybucket/existingFile0'))
self.assertFalse(s3_client.exists('s3://mybucket/existingFile0'))
self.assertRaises(
InvalidDeleteException,
lambda: s3_client.remove('s3://mybucket/')
)
self.assertRaises(
InvalidDeleteException,
lambda: s3_client.remove('s3://mybucket')
)
s3_client.put(self.tempFilePath, 's3://mybucket/removemedir/file')
self.assertRaises(
InvalidDeleteException,
lambda: s3_client.remove('s3://mybucket/removemedir', recursive=False)
)
def _run_multipart_test(self, part_size, file_size):
file_contents = b"a" * file_size
s3_path = 's3://mybucket/putMe'
tmp_file = tempfile.NamedTemporaryFile(mode='wb', delete=True)
tmp_file_path = tmp_file.name
tmp_file.write(file_contents)
tmp_file.flush()
s3_client = S3Client(AWS_ACCESS_KEY, AWS_SECRET_KEY)
s3_client.s3.create_bucket('mybucket')
s3_client.put_multipart(tmp_file_path, s3_path, part_size=part_size)
self.assertTrue(s3_client.exists(s3_path))
# b/c of https://github.com/spulec/moto/issues/131 have to
# get contents to check size
key_contents = s3_client.get_key(s3_path).get_contents_as_string()
self.assertEqual(len(file_contents), len(key_contents))
tmp_file.close()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c4577248756e56162bb443e0566eb570",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 95,
"avg_line_length": 38.00754716981132,
"alnum_prop": 0.6320492454328832,
"repo_name": "glenndmello/luigi",
"id": "45f1ccd8357e7de877415a910ac290bee184184c",
"size": "10675",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "test/s3_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10980"
},
{
"name": "JavaScript",
"bytes": "40245"
},
{
"name": "Python",
"bytes": "964982"
},
{
"name": "Shell",
"bytes": "2297"
}
],
"symlink_target": ""
} |
"""Elastic Search indexes for Data model."""
import elasticsearch_dsl as dsl
from resolwe.elastic.fields import Name, ProcessType
from resolwe.elastic.indices import BaseIndex
from ..models import Data
from .base import BaseDocument, BaseIndexMixin
class DataDocument(BaseDocument):
"""Document for data search."""
started = dsl.Date()
finished = dsl.Date()
status = dsl.Keyword()
process = dsl.Integer()
process_type = ProcessType()
# Keep backward compatibility.
type = ProcessType() # pylint: disable=invalid-name
process_name = Name()
tags = dsl.Keyword(multi=True)
collection = dsl.Integer(multi=True)
parents = dsl.Integer(multi=True)
children = dsl.Integer(multi=True)
entity = dsl.Integer(multi=True)
class Meta:
"""Meta class for data search document."""
index = 'data'
class DataIndex(BaseIndexMixin, BaseIndex):
"""Index for data objects used in ``DataDocument``."""
queryset = Data.objects.all().prefetch_related(
'process',
'contributor'
)
object_type = Data
document_class = DataDocument
mapping = {
'process': 'process.id',
'process_name': 'process.name',
'process_type': 'process.type',
'type': 'process.type',
}
def get_dependencies(self):
"""Return dependencies, which should trigger updates of this model."""
# pylint: disable=no-member
return super().get_dependencies() + [
Data.collection_set,
Data.entity_set,
Data.parents,
]
def get_collection_value(self, obj):
"""Extract collections this object is in."""
return list(obj.collection_set.values_list('pk', flat=True))
def get_parents_value(self, obj):
"""Extract parents."""
return list(obj.parents.values_list('pk', flat=True))
def get_children_value(self, obj):
"""Extract children."""
return list(obj.children.values_list('pk', flat=True))
def get_entity_value(self, obj):
"""Extract entities."""
return list(obj.entity_set.values_list('pk', flat=True))
| {
"content_hash": "09d24bfd5a7c75100664bf3f0fd6d0ee",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 28.69333333333333,
"alnum_prop": 0.6305762081784386,
"repo_name": "jberci/resolwe",
"id": "d3f50f40d92eb90b58df6987739b22d1eab92af8",
"size": "2152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resolwe/flow/elastic_indexes/data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "385"
},
{
"name": "Python",
"bytes": "1089792"
},
{
"name": "Shell",
"bytes": "5744"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='base.html')),
url(r'^admin/', include(admin.site.urls)),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| {
"content_hash": "dda63c216279781599ab92b9211579ce",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 26,
"alnum_prop": 0.6923076923076923,
"repo_name": "donnex/django-docker-coookiecutter",
"id": "007efac1b8e1a22e52bbe19fdebc574979799f64",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{ cookiecutter.repo_name }}/django/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1039"
},
{
"name": "Dockerfile",
"bytes": "466"
},
{
"name": "HTML",
"bytes": "2275"
},
{
"name": "JavaScript",
"bytes": "2310"
},
{
"name": "Python",
"bytes": "4453"
}
],
"symlink_target": ""
} |
import sys, urllib, urllib2, time, os, math
import httplib
try:
from optparse import OptionParser
except ImportError:
OptionParser = False
# setting this to True will exchange more useful error messages
# for privacy, hiding URLs and error messages.
HIDE_ALL = False
class WMS (object):
fields = ("bbox", "srs", "width", "height", "format", "layers", "styles")
defaultParams = {'version': '1.1.1', 'request': 'GetMap', 'service': 'WMS'}
__slots__ = ("base", "params", "client", "data", "response")
def __init__ (self, base, params, user=None, password=None):
self.base = base
if self.base[-1] not in "?&":
if "?" in self.base:
self.base += "&"
else:
self.base += "?"
self.params = {}
if user is not None and password is not None:
x = urllib2.HTTPPasswordMgrWithDefaultRealm()
x.add_password(None, base, user, password)
self.client = urllib2.build_opener()
auth = urllib2.HTTPBasicAuthHandler(x)
self.client = urllib2.build_opener(auth)
else:
self.client = urllib2.build_opener()
for key, val in self.defaultParams.items():
if self.base.lower().rfind("%s=" % key.lower()) == -1:
self.params[key] = val
for key in self.fields:
if params.has_key(key):
self.params[key] = params[key]
elif self.base.lower().rfind("%s=" % key.lower()) == -1:
self.params[key] = ""
def url (self):
return self.base + urllib.urlencode(self.params)
def fetch (self):
urlrequest = urllib2.Request(self.url())
# urlrequest.add_header("User-Agent",
# "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)" )
response = None
while response is None:
try:
response = self.client.open(urlrequest)
data = response.read()
# check to make sure that we have an image...
msg = response.info()
if msg.has_key("Content-Type"):
ctype = msg['Content-Type']
if ctype[:5].lower() != 'image':
if HIDE_ALL:
raise Exception("Did not get image data back. (Adjust HIDE_ALL for more detail.)")
else:
raise Exception("Did not get image data back. \nURL: %s\nContent-Type Header: %s\nResponse: \n%s" % (self.url(), ctype, data))
except httplib.BadStatusLine:
response = None # try again
return data, response
def setBBox (self, box):
self.params["bbox"] = ",".join(map(str, box))
def seed (svc, layer, levels = (0, 5), bbox = None, padding = 0, force = False, reverse = False ):
from Layer import Tile
try:
padding = int(padding)
except:
raise Exception('Your padding parameter is %s, but should be an integer' % padding)
if not bbox: bbox = layer.bbox
start = time.time()
total = 0
for z in range(*levels):
bottomleft = layer.getClosestCell(z, bbox[0:2])
topright = layer.getClosestCell(z, bbox[2:4])
# Why Are we printing to sys.stderr??? It's not an error.
# This causes a termination if run from cron or in background if shell is terminated
#print >>sys.stderr, "###### %s, %s" % (bottomleft, topright)
print "###### %s, %s" % (bottomleft, topright)
zcount = 0
metaSize = layer.getMetaSize(z)
ztiles = int(math.ceil(float(topright[1] - bottomleft[1]) / metaSize[0]) * math.ceil(float(topright[0] - bottomleft[0]) / metaSize[1]))
if reverse:
startX = topright[0] + metaSize[0] + (1 * padding)
endX = bottomleft[0] - (1 * padding)
stepX = -metaSize[0]
startY = topright[1] + metaSize[1] + (1 * padding)
endY = bottomleft[1] - (1 * padding)
stepY = -metaSize[1]
else:
startX = bottomleft[0] - (1 * padding)
endX = topright[0] + metaSize[0] + (1 * padding)
stepX = metaSize[0]
startY = bottomleft[1] - (1 * padding)
endY = topright[1] + metaSize[1] + (1 * padding)
stepY = metaSize[1]
for y in range(startY, endY, stepY):
for x in range(startX, endX, stepX):
tileStart = time.time()
tile = Tile(layer,x,y,z)
bounds = tile.bounds()
svc.renderTile(tile,force=force)
total += 1
zcount += 1
box = "(%.4f %.4f %.4f %.4f)" % bounds
print "%02d (%06d, %06d) = %s [%.4fs : %.3f/s] %s/%s" \
% (z,x,y, box, time.time() - tileStart, total / (time.time() - start + .0001), zcount, ztiles)
def main ():
if not OptionParser:
raise Exception("TileCache seeding requires optparse/OptionParser. Your Python may be too old.\nSend email to the mailing list \n(http://openlayers.org/mailman/listinfo/tilecache) about this problem for help.")
usage = "usage: %prog <layer> [<zoom start> <zoom stop>]"
parser = OptionParser(usage=usage, version="%prog (2.10)")
parser.add_option("-f","--force", action="store_true", dest="force", default = False,
help="force recreation of tiles even if they are already in cache")
parser.add_option("-b","--bbox",action="store", type="string", dest="bbox", default = None,
help="restrict to specified bounding box")
parser.add_option("-p","--padding",action="store", type="int", dest="padding", default = 0,
help="extra margin tiles to seed around target area. Defaults to 0 "+
"(some edge tiles might be missing). A value of 1 ensures all tiles "+
"will be created, but some tiles may be wholly outside your bbox")
parser.add_option("-r","--reverse", action="store_true", dest="reverse", default = False,
help="Reverse order of seeding tiles")
(options, args) = parser.parse_args()
if len(args)>3:
parser.error("Incorrect number of arguments. bbox and padding are now options (-b and -p)")
from Service import Service, cfgfiles
from Layer import Layer
svc = Service.load(*cfgfiles)
layer = svc.layers[args[0]]
if options.bbox:
bboxlist = map(float,options.bbox.split(","))
else:
bboxlist=None
if len(args)>1:
seed(svc, layer, map(int, args[1:3]), bboxlist , padding=options.padding, force = options.force, reverse = options.reverse)
else:
for line in sys.stdin.readlines():
lat, lon, delta = map(float, line.split(","))
bbox = (lon - delta, lat - delta, lon + delta, lat + delta)
print "===> %s <===" % (bbox,)
seed(svc, layer, (5, 17), bbox , force = options.force )
if __name__ == '__main__':
main()
| {
"content_hash": "72ebcbead7532f626db1e03ab198749f",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 218,
"avg_line_length": 42.74251497005988,
"alnum_prop": 0.5462314373774166,
"repo_name": "DistributedOpenUnifiedGovernmentNetwork/mapwarper",
"id": "b2e05f94349a84309eeaedf1873e1a038d4705a0",
"size": "7218",
"binary": false,
"copies": "7",
"ref": "refs/heads/ruby1.9.1",
"path": "publicoldtestsite/cgi/tilecache/TileCache/Client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "684811"
},
{
"name": "Groff",
"bytes": "8"
},
{
"name": "HTML",
"bytes": "1750094"
},
{
"name": "JavaScript",
"bytes": "513548"
},
{
"name": "Makefile",
"bytes": "4734"
},
{
"name": "Python",
"bytes": "304410"
},
{
"name": "Ruby",
"bytes": "1108095"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
import copy
import os
from django.utils.log import DEFAULT_LOGGING
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Application definition
INSTALLED_APPS = [
'wagtail.wagtailusers',
'wagtail.wagtailimages',
'wagtail.wagtailsearch',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'wagtail.contrib.modeladmin',
'wagtail.wagtaildocs', # required by modeladmin
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'privagal.core',
'privagal.timeline',
'privagal.gallery',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware', # Required but not needed...
'privagal.core.middleware.AuthTokenMiddleware',
]
ROOT_URLCONF = 'privagal.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'privagal.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
os.path.join(PROJECT_DIR, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Just to be easily override by children conf files.
LOGGING = copy.deepcopy(DEFAULT_LOGGING)
# Wagtail settings
WAGTAIL_SITE_NAME = "privagal"
WAGTAILIMAGES_JPEG_QUALITY = 85
WAGTAILIMAGES_IMAGE_MODEL = 'wagtailimages.image'
PASSWORD_REQUIRED_TEMPLATE = 'password_required.html'
# Privagal settings
PRIVAGAL_TIMELINE_INITIAL_PASSWORD = None # required
PRIVAGAL_SITE_HOSTNAME = '127.0.0.1' # Assume localhost for dev and test
PRIVAGAL_AUTH_TOKEN_REQUIRED = False # Allow page password form by default
| {
"content_hash": "6fc4f2d2354256268b596a212d006981",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 82,
"avg_line_length": 25.966666666666665,
"alnum_prop": 0.6903080872913993,
"repo_name": "ychab/privagal",
"id": "af43f17f6af1b80562e27a1a3c8ba455fc793b51",
"size": "3116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "privagal/settings/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "396"
},
{
"name": "HTML",
"bytes": "8727"
},
{
"name": "JavaScript",
"bytes": "3460"
},
{
"name": "Python",
"bytes": "64892"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import warnings
warnings.warn("DEAD: Broken module", UserWarning, stacklevel=2)
DEAD = True
if not DEAD:
import re
import six
from gherkin.formatter import filters
re_type = type(re.compile(''))
class FilterError(Exception):
pass
class FilterFormatter(object):
def __init__(self, formatter, filters):
self.formatter = formatter
self.filter = self.detect_filters(filters)
self._feature_tags = []
self._feature_element_tags = []
self._examples_tags = []
self._feature_events = []
self._background_events = []
self._feature_element_events = []
self._examples_events = []
self._feature_name = None
self._feature_element_name = None
self._examples_name = None
self._feature_element_range = None
self._examples_range = None
def uri(self, uri):
self.formatter.uri(uri)
def feature(self, feature):
self._feature_tags = feature.tags
self._feature_name = feature.name
self._feature_events = [feature]
def background(self, background):
self._feature_element_name = background.name
self._feature_element_range = background.line_range()
self._background_events = [background]
def scenario(self, scenario):
self.replay()
self._feature_element_tags = scenario.tags
self._feature_element_name = scenario.name
self._feature_element_range = scenario.line_range()
self._feature_element_events = [scenario]
def scenario_outline(self, scenario_outline):
self.replay()
self._feature_element_tags = scenario_outline.tags
self._feature_element_name = scenario_outline.name
self._feature_element_range = scenario_outline.line_range()
self._feature_element_events = [scenario_outline]
def examples(self, examples):
self.replay()
self._examples_tags = examples.tags
self._examples_name = examples.name
if len(examples.rows) == 0:
table_body_range = (examples.line_range()[1],
examples.line_range()[1])
elif len(examples.rows) == 1:
table_body_range = (examples.rows[0].line, examples.rows[0].line)
else:
table_body_range = (examples.rows[1].line, examples.rows[-1].line)
self._examples_range = [examples.line_range()[0], table_body_range[1]]
if self.filter.eval([], [], [table_body_range]):
examples.rows = self.filter.filter_table_body_rows(examples.rows)
self._examples_events = [examples]
def step(self, step):
if len(self._feature_element_events) > 0:
self._feature_element_events.append(step)
else:
self._background_events.append(step)
self._feature_element_range = (self._feature_element_range[0],
step.line_range()[1])
def eof(self):
self.replay()
self.formatter.eof()
def detect_filters(self, filter_list):
filter_classes = set([type(f) for f in filter_list])
if len(filter_classes) > 1 and filter_classes != set([str, six.text_type]):
message = "Inconsistent filters: %r" % (filter_list, )
raise FilterError(message)
if type(filter_list[0]) == int:
return filters.LineFilter(filter_list)
if type(filter_list[0]) == re_type:
return filters.RegexpFilter(filter_list)
return filters.TagFilter(filter_list)
def replay(self):
tags = self._feature_tags + self._feature_element_tags
names = [self._feature_name, self._feature_element_name]
ranges = [self._feature_element_range]
feature_element_ok = self.filter.eval(
tags,
[n for n in names if n is not None],
[r for r in ranges if r is not None],
)
examples_ok = self.filter.eval(
tags + self._examples_tags,
[n for n in names + [self._examples_name] if n is not None],
[r for r in ranges + [self._examples_range] if r is not None],
)
if feature_element_ok or examples_ok:
self.replay_events(self._feature_events)
self.replay_events(self._background_events)
self.replay_events(self._feature_element_events)
if examples_ok:
self.replay_events(self._examples_events)
self._examples_events[:] = []
self._examples_tags[:] = []
self._examples_name = None
def replay_events(self, events):
for event in events:
event.replay(self.formatter)
events[:] = []
| {
"content_hash": "21ae8d73ec735d620c14919f55b1a69a",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 87,
"avg_line_length": 35.875,
"alnum_prop": 0.5472319008904375,
"repo_name": "allanlewis/behave",
"id": "858a60af4a7f0525565477677586efea9021d2a3",
"size": "5200",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "behave/formatter/__filter_formatter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "272"
},
{
"name": "Cucumber",
"bytes": "588345"
},
{
"name": "Python",
"bytes": "758627"
},
{
"name": "Shell",
"bytes": "856"
}
],
"symlink_target": ""
} |
from django.db import models
class DNSIncrementalManager(models.Manager):
def get_query_set(self):
return super(DNSIncrementalManager, self).get_query_set().filter(ttype='dns-incremental') # noqa
class DNSFullManager(models.Manager):
def get_query_set(self):
return super(DNSFullManager, self).get_query_set().filter(ttype='dns-full') # noqa
class Task(models.Model):
task = models.CharField(max_length=255, blank=False)
ttype = models.CharField(max_length=255, blank=False)
objects = models.Manager()
dns_incremental = DNSIncrementalManager()
dns_full = DNSFullManager()
@classmethod
def schedule_zone_rebuild(cls, soa):
"""
Schedules a rebuild that only changes zone file contents and *not*
config contents. Operations that can not possibly change the precense
of a zone statement in the config file should use this rebuild type.
"""
Task(task=str(soa.pk), ttype='dns-incremental').save()
@classmethod
def schedule_all_dns_rebuild(cls, soa):
"""
Schedules a rebuild for a zone and also regenerates the global zone
config. This type of rebiuld is reserved for operations that would
cause a zone to be removed or added to any config file.
"""
Task(task=str(soa.pk), ttype='dns-full').save()
def __repr__(self):
return "<Task: {0}>".format(self)
def __str__(self):
return "{0} {1}".format(self.ttype, self.task)
def save(self):
super(Task, self).save()
class Meta:
db_table = u'task'
ordering = ['task']
| {
"content_hash": "7cbb40b5d21ecdc03c0d50eed30d848c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 105,
"avg_line_length": 31.980392156862745,
"alnum_prop": 0.6468424279583078,
"repo_name": "rtucker-mozilla/inventory",
"id": "a469a5ddabb405c49b83a9cc392c843fc23309a1",
"size": "1631",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/task/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5104"
},
{
"name": "CSS",
"bytes": "362837"
},
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "HTML",
"bytes": "1195738"
},
{
"name": "JavaScript",
"bytes": "1530665"
},
{
"name": "Makefile",
"bytes": "14421"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Python",
"bytes": "3642241"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from flask import jsonify
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.api.app import etag, BadRequest, NotFoundError
from flexget.plugin import get_plugin_by_name
tmdb_api = api.namespace('tmdb', description='TMDB lookup endpoint')
class ObjectsContainer(object):
poster_object = {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'null']},
'movie_id': {'type': ['integer', 'null']},
'urls': {'type': 'object'},
'file_path': {'type': 'string'},
'width': {'type': 'integer'},
'height': {'type': 'integer'},
'aspect_ratio': {'type': 'number'},
'vote_average': {'type': 'number'},
'vote_count': {'type': 'integer'},
'language_code': {'type': ['string', 'null']}
},
'required': ['id', 'movie_id', 'urls', 'file_path', 'width', 'height', 'aspect_ratio', 'vote_average',
'vote_count', 'language_code'],
'additionalProperties': False
}
movie_object = {
'type': 'object',
'properties': {
'id': {'type': 'integer'},
'imdb_id': {'type': 'string'},
'name': {'type': 'string'},
'original_name': {'type': ['string', 'null']},
'alternative_name': {'type': ['string', 'null']},
'year': {'type': 'integer'},
'runtime': {'type': 'integer'},
'language': {'type': 'string'},
'overview': {'type': 'string'},
'tagline': {'type': 'string'},
'rating': {'type': ['number', 'null']},
'votes': {'type': ['integer', 'null']},
'popularity': {'type': ['number', 'null']},
'adult': {'type': 'boolean'},
'budget': {'type': ['integer', 'null']},
'revenue': {'type': ['integer', 'null']},
'homepage': {'type': ['string', 'null'], 'format': 'uri'},
'posters': {'type': 'array', 'items': poster_object},
'backdrops': {'type': 'array', 'items': poster_object},
'genres': {'type': 'array', 'items': {'type': 'string'}},
'updated': {'type': 'string', 'format': 'date-time'},
},
'required': ['id', 'name', 'year', 'original_name', 'alternative_name', 'runtime', 'language',
'overview', 'tagline', 'rating', 'votes', 'popularity', 'adult', 'budget', 'revenue', 'homepage',
'genres', 'updated'],
'additionalProperties': False
}
description = 'Either title, TMDB ID or IMDB ID are required for a lookup'
return_schema = api.schema('tmdb_search_schema', ObjectsContainer.movie_object)
tmdb_parser = api.parser()
tmdb_parser.add_argument('title', help='Movie title')
tmdb_parser.add_argument('tmdb_id', help='TMDB ID')
tmdb_parser.add_argument('imdb_id', help='IMDB ID')
tmdb_parser.add_argument('year', type=int, help='Movie year')
tmdb_parser.add_argument('only_cached', type=int, help='Return only cached results')
tmdb_parser.add_argument('include_posters', type=inputs.boolean, default=False, help='Include posters in response')
tmdb_parser.add_argument('include_backdrops', type=inputs.boolean, default=False, help='Include backdrops in response')
@tmdb_api.route('/movies/')
@api.doc(description=description)
class TMDBMoviesAPI(APIResource):
@etag
@api.response(200, model=return_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
@api.doc(parser=tmdb_parser)
def get(self, session=None):
""" Get TMDB movie data """
args = tmdb_parser.parse_args()
title = args.get('title')
tmdb_id = args.get('tmdb_id')
imdb_id = args.get('imdb_id')
posters = args.pop('include_posters', False)
backdrops = args.pop('include_backdrops', False)
if not (title or tmdb_id or imdb_id):
raise BadRequest(description)
lookup = get_plugin_by_name('api_tmdb').instance.lookup
try:
movie = lookup(session=session, **args)
except LookupError as e:
raise NotFoundError(e.args[0])
return_movie = movie.to_dict()
if posters:
return_movie['posters'] = [p.to_dict() for p in movie.posters]
if backdrops:
return_movie['backdrops'] = [p.to_dict() for p in movie.backdrops]
return jsonify(return_movie)
| {
"content_hash": "38ac90c496e4a224184ff8a8b1345417",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 119,
"avg_line_length": 39.675438596491226,
"alnum_prop": 0.5604687154543445,
"repo_name": "sean797/Flexget",
"id": "bb2660f9c1416a90bbb819c1ed7b58d2639e00e0",
"size": "4523",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/api/plugins/tmdb_lookup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2170588"
}
],
"symlink_target": ""
} |
import random
from direct.directnotify import DirectNotifyGlobal
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import ClockDelta
from direct.interval.IntervalGlobal import *
from direct.task import Task
from otp.ai.AIBase import *
from otp.level import BasicEntities
from otp.level import DistributedEntityAI
from toontown.coghq import BattleBlockerAI
from toontown.coghq import LaserGameMineSweeper
from toontown.coghq import LaserGameRoll
class DistributedMoverAI(DistributedEntityAI.DistributedEntityAI, NodePath, BasicEntities.NodePathAttribs):
def __init__(self, level, entId):
DistributedEntityAI.DistributedEntityAI.__init__(self, level, entId)
node = hidden.attachNewNode('DistributedMoverAI')
NodePath.__init__(self, node)
if not hasattr(self, 'switchId'):
self.switchId = 0
if not hasattr(self, 'pos0Wait'):
self.pos0Wait = 1.0
if not hasattr(self, 'pos0Move'):
self.pos0Move = 1.0
if not hasattr(self, 'pos1Wait'):
self.pos1Wait = 1.0
if not hasattr(self, 'pos1Move'):
self.pos1Move = 1.0
if not hasattr(self, 'startOn'):
self.startOn = 0
if not hasattr(self, 'cycleType'):
self.cycleType = 'return'
self.moveTime = {}
self.setTimes()
self.oK2Play = 1
def generate(self):
DistributedEntityAI.DistributedEntityAI.generate(self)
if self.switchId != 0:
self.accept(self.getOutputEventName(self.switchId), self.reactToSwitch)
self.timerName = 'mover %s' % self.doId
self.setPos(self.pos)
self.setHpr(self.hpr)
self.setTimes()
if self.startOn:
self.sendMove()
def delete(self):
taskMgr.remove(self.timerName)
self.ignoreAll()
DistributedEntityAI.DistributedEntityAI.delete(self)
def destroy(self):
self.notify.info('destroy entity(laserField) %s' % self.entId)
DistributedEntityAI.DistributedEntityAI.destroy(self)
def reactToSwitch(self, on):
if on:
self.sendMove()
def setPos0Move(self, time):
self.pos0Move = time
self.setTimes()
def setPos1Move(self, time):
self.pos1Move = time
self.setTimes()
def setPos0Wait(self, time):
self.pos0Wait = time
self.setTimes()
def setPos1Wait(self, time):
self.pos1Wait = time
self.setTimes()
def setTimes(self):
self.moveTime = { }
self.moveTime['return'] = self.pos0Move + self.pos1Wait + self.pos1Move
self.moveTime['loop'] = self.pos0Wait + self.pos0Move + self.pos1Wait + self.pos1Move
self.moveTime['oneWay'] = self.pos0Move
self.moveTime['linear'] = self.pos0Move * 8
def setCycleType(self, type):
self.cycleType = type
self.oK2Play = 1
def setStartOn(self, on):
self.startOn = on
self.sendMove()
def sendMove(self):
timeStamp = ClockDelta.globalClockDelta.getRealNetworkTime()
if self.oK2Play:
self.sendUpdate('startMove', [timeStamp])
taskMgr.doMethodLater(self.moveTime[self.cycleType], self._DistributedMoverAI__resetTimer, self.timerName)
self.oK2Play = 0
def _DistributedMoverAI__resetTimer(self, taskMgrFooler = 1):
if not self.cycleType == 'oneWay':
self.oK2Play = 1
if self.cycleType in ('loop', 'linear') or self.startOn:
self.sendMove()
return Task.done
| {
"content_hash": "f8a93fc1944ce4a2527ec74acf430794",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 118,
"avg_line_length": 33.51401869158879,
"alnum_prop": 0.6422197434467373,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "aa099c8b265c4cacfa929e70467b933ba28219f9",
"size": "3586",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "toontown/coghq/DistributedMoverAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.add_image, 'image1', 'root', os.environ.get('isoForVmUrl')],
[TestAction.create_vm_by_image, 'image1', 'iso', 'vm1', 'cluster=cluster2'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup1'],
[TestAction.start_vm, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.create_volume, 'volume1', 'cluster=cluster2', 'flag=scsi'],
[TestAction.resize_data_volume, 'volume1', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster2', 'flag=thin,scsi'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.use_vm_backup, 'vm1-backup1'],
[TestAction.delete_image, 'image2'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup2'],
[TestAction.stop_vm, 'vm1'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.detach_volume, 'volume3'],
[TestAction.delete_volume, 'volume2'],
[TestAction.recover_volume, 'volume2'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup3'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_vm_backup, 'vm2-backup3'],
[TestAction.create_mini_vm, 'vm3', 'cluster=cluster1', 'flag=thin'],
[TestAction.delete_volume, 'volume2'],
[TestAction.expunge_volume, 'volume2'],
[TestAction.destroy_vm, 'vm3'],
[TestAction.recover_vm, 'vm3'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_volume_backup, 'volume3', 'volume3-backup4'],
[TestAction.create_volume, 'volume4', 'cluster=cluster1', 'flag=scsi'],
[TestAction.resize_data_volume, 'volume4', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_backup, 'volume3-backup4'],
[TestAction.start_vm, 'vm1'],
[TestAction.change_vm_ha, 'vm1'],
])
'''
The final status:
Running:['vm1']
Stopped:['vm2', 'vm3']
Enadbled:['vm1-backup1', 'volume1-backup2', 'vm2-backup3', 'volume3-backup4']
attached:['volume1', 'volume3']
Detached:['volume4']
Deleted:['image2']
Expunged:['volume2', 'image1']
Ha:['vm1']
Group:
vm_backup2:['vm2-backup3']---vm2@
vm_backup1:['vm1-backup1']---vm1@
''' | {
"content_hash": "b2023886f7938d7f066501cf10e81dfc",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 104,
"avg_line_length": 40.138888888888886,
"alnum_prop": 0.6878892733564014,
"repo_name": "zstackio/zstack-woodpecker",
"id": "527f3ead4dd927235e5daa0f2a173a79c5504951",
"size": "2890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/mini/multiclusters/paths/multi_path4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
from praw.models import User
from .. import UnitTest
class TestUser(UnitTest):
def test_me__in_read_only_mode(self):
assert self.reddit.read_only
user = User(self.reddit)
assert user.me() is None
| {
"content_hash": "0558efe5a3669dcad03f4189fa9d8dd5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 41,
"avg_line_length": 22.7,
"alnum_prop": 0.6607929515418502,
"repo_name": "leviroth/praw",
"id": "1542fb46310fafea9c7fba9f0838902e999f8e35",
"size": "227",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/models/test_user.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "513471"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
from cms.models import Page
from cms.models.titlemodels import Title
from cms.utils import i18n
from collections import defaultdict
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete
from django.utils import translation
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
def update_site_and_page_choices(lang=None):
lang = lang or translation.get_language()
SITE_CHOICES_KEY = get_site_cache_key(lang)
PAGE_CHOICES_KEY = get_page_cache_key(lang)
if settings.CMS_MODERATOR:
title_queryset = Title.objects.filter(page__publisher_is_draft=False)
else:
title_queryset = Title.objects.filter(page__publisher_is_draft=True)
title_queryset = title_queryset.select_related('page', 'page__site').order_by('page__tree_id', 'page__lft', 'page__rght')
pages = defaultdict(SortedDict)
sites = {}
for title in title_queryset:
page = pages[title.page.site.pk].get(title.page.pk, {})
page[title.language] = title
pages[title.page.site.pk][title.page.pk] = page
sites[title.page.site.pk] = title.page.site.name
site_choices = []
page_choices = [('', '----')]
language_order = [lang] + i18n.get_fallback_languages(lang)
for sitepk, sitename in sites.items():
site_choices.append((sitepk, sitename))
site_page_choices = []
for titles in pages[sitepk].values():
title = None
for language in language_order:
title = titles.get(language)
if title:
break
if not title:
continue
indent = u" " * title.page.level
page_title = mark_safe(u"%s%s" % (indent, title.title))
site_page_choices.append((title.page.pk, page_title))
page_choices.append((sitename, site_page_choices))
# We set it to 1 day here because we actively invalidate this cache.
cache.set(SITE_CHOICES_KEY, site_choices, 86400)
cache.set(PAGE_CHOICES_KEY, page_choices, 86400)
return site_choices, page_choices
def get_site_choices(lang=None):
lang = lang or translation.get_language()
site_choices = cache.get(get_site_cache_key(lang))
if site_choices is None:
site_choices, page_choices = update_site_and_page_choices(lang)
return site_choices
def get_page_choices(lang=None):
lang = lang or translation.get_language()
page_choices = cache.get(get_page_cache_key(lang))
if page_choices is None:
site_choices, page_choices = update_site_and_page_choices(lang)
return page_choices
def _get_key(prefix, lang):
return "%s-%s" % (prefix, lang)
def get_site_cache_key(lang):
return _get_key(settings.CMS_SITE_CHOICES_CACHE_KEY, lang)
def get_page_cache_key(lang):
return _get_key(settings.CMS_PAGE_CHOICES_CACHE_KEY, lang)
def _clean_many(prefix):
keys = []
for lang in [l[0] for l in settings.LANGUAGES]:
keys.append(_get_key(prefix, lang))
cache.delete_many(keys)
def clean_site_choices_cache(sender, **kwargs):
_clean_many(settings.CMS_SITE_CHOICES_CACHE_KEY)
def clean_page_choices_cache(sender, **kwargs):
_clean_many(settings.CMS_PAGE_CHOICES_CACHE_KEY)
post_save.connect(clean_page_choices_cache, sender=Page)
post_save.connect(clean_site_choices_cache, sender=Site)
post_delete.connect(clean_page_choices_cache, sender=Page)
post_delete.connect(clean_site_choices_cache, sender=Site)
| {
"content_hash": "4271d1f3c6b3ce175c30fb164763ab24",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 125,
"avg_line_length": 37.19387755102041,
"alnum_prop": 0.6710562414266118,
"repo_name": "chrisglass/django-cms",
"id": "f40a7631780a46081b0d63d256f479f8ffc83418",
"size": "3669",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "cms/forms/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "521444"
},
{
"name": "Python",
"bytes": "1947498"
}
],
"symlink_target": ""
} |
DEFAULT_BAUDRATE = 38400
import sys
if (hasattr(sys, 'implementation') and
sys.implementation.name == 'micropython'):
# if using pyBoard
from pyb import UART as uart_base
else:
from serial import Serial as uart_base
from obdlib.logging import logger
class UART(object):
def __init__(self):
self.bus_name = uart_base.__name__
self.bus = None
self.map = {}
def connection(self, port, baudrate=DEFAULT_BAUDRATE):
try:
self.bus = uart_base(port, baudrate)
self._mapping()
except Exception as err:
# logging exception
logger.error(err)
return None
return self
def __getattr__(self, item):
def args_wrapper(*args, **kwargs):
try:
response = getattr(self.bus, item)(*args, **kwargs)
except AttributeError:
response = self._invoke_mapping(item, *args, **kwargs)
return response
return args_wrapper
def _invoke_mapping(self, method, *args, **kwargs):
try:
item = self.map[self.bus_name][method]
return getattr(self.bus, item)(*args, **kwargs) if item else None
except KeyError:
raise Exception(
"Unregistered method or attribute {}".format(method))
def _mapping(self):
self.map = {
"UART": {
"close": "deinit",
"flushInput": "",
"flushOutput": ""
},
}
| {
"content_hash": "cb7f7495be83d6d85813840df4ba928b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 27.589285714285715,
"alnum_prop": 0.5372168284789643,
"repo_name": "s-s-boika/obdlib",
"id": "d41fccb7523d2734e1207ea00040f62a02a41e0c",
"size": "1545",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "obdlib/uart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "2796"
},
{
"name": "Python",
"bytes": "92747"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
} |
"""Django ORM models for Social Auth"""
import six
from django.db import models
from django.conf import settings
from django.db.utils import IntegrityError
from social.utils import setting_name
from social.storage.django_orm import DjangoUserMixin, \
DjangoAssociationMixin, \
DjangoNonceMixin, \
DjangoCodeMixin, \
BaseDjangoStorage
from social.apps.django_app.default.fields import JSONField
USER_MODEL = getattr(settings, setting_name('USER_MODEL'), None) or \
getattr(settings, 'AUTH_USER_MODEL', None) or \
'auth.User'
UID_LENGTH = getattr(settings, setting_name('UID_LENGTH'), 255)
NONCE_SERVER_URL_LENGTH = getattr(
settings, setting_name('NONCE_SERVER_URL_LENGTH'), 255)
ASSOCIATION_SERVER_URL_LENGTH = getattr(
settings, setting_name('ASSOCIATION_SERVER_URL_LENGTH'), 255)
ASSOCIATION_HANDLE_LENGTH = getattr(
settings, setting_name('ASSOCIATION_HANDLE_LENGTH'), 255)
class UserSocialAuth(models.Model, DjangoUserMixin):
"""Social Auth association model"""
user = models.ForeignKey(USER_MODEL, related_name='social_auth')
provider = models.CharField(max_length=32)
uid = models.CharField(max_length=UID_LENGTH)
extra_data = JSONField()
class Meta:
"""Meta data"""
unique_together = ('provider', 'uid')
db_table = 'social_auth_usersocialauth'
@classmethod
def get_social_auth(cls, provider, uid):
try:
return cls.objects.select_related('user').get(provider=provider,
uid=uid)
except UserSocialAuth.DoesNotExist:
return None
@classmethod
def username_max_length(cls):
username_field = cls.username_field()
field = UserSocialAuth.user_model()._meta.get_field(username_field)
return field.max_length
@classmethod
def user_model(cls):
user_model = UserSocialAuth._meta.get_field('user').rel.to
if isinstance(user_model, six.string_types):
app_label, model_name = user_model.split('.')
return models.get_model(app_label, model_name)
return user_model
class Nonce(models.Model, DjangoNonceMixin):
"""One use numbers"""
server_url = models.CharField(max_length=NONCE_SERVER_URL_LENGTH)
timestamp = models.IntegerField()
salt = models.CharField(max_length=65)
class Meta:
db_table = 'social_auth_nonce'
class Association(models.Model, DjangoAssociationMixin):
"""OpenId account association"""
server_url = models.CharField(max_length=ASSOCIATION_SERVER_URL_LENGTH)
handle = models.CharField(max_length=ASSOCIATION_HANDLE_LENGTH)
secret = models.CharField(max_length=255) # Stored base64 encoded
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.CharField(max_length=64)
class Meta:
db_table = 'social_auth_association'
class Code(models.Model, DjangoCodeMixin):
email = models.EmailField()
code = models.CharField(max_length=32, db_index=True)
verified = models.BooleanField(default=False)
class Meta:
db_table = 'social_auth_code'
unique_together = ('email', 'code')
class DjangoStorage(BaseDjangoStorage):
user = UserSocialAuth
nonce = Nonce
association = Association
code = Code
@classmethod
def is_integrity_error(cls, exception):
return exception.__class__ is IntegrityError
| {
"content_hash": "2b988dca993972bb8a97740fda17311a",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 76,
"avg_line_length": 34.2952380952381,
"alnum_prop": 0.6523188003332407,
"repo_name": "duoduo369/python-social-auth",
"id": "cf886b9766839f036cfe30c868a5cef9b3ab4565",
"size": "3601",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "social/apps/django_app/default/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54"
},
{
"name": "Python",
"bytes": "562718"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weibo_admin.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| {
"content_hash": "b9e3301d01593e32dcaedcc9bafcfa41",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.6056105610561056,
"repo_name": "ResolveWang/WeiboSpider",
"id": "b73b94e2a3c3a8583b5f7c6ed4ad3bc7cc37385e",
"size": "606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2197379"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
} |
from flask.ext.wtf import Form
from wtforms import TextField, BooleanField
from wtforms.validators import Required
class LoginForm(Form):
openid = TextField("openid", validators=[Required()])
remember_me = BooleanField("remember_me", default=False)
| {
"content_hash": "387b041f046939ce9a177250cd7e5854",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 32.375,
"alnum_prop": 0.7683397683397684,
"repo_name": "pkulev/c3",
"id": "a6a340aa5f0a4c37623e95df171d07e2217f4c26",
"size": "259",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ewi/api/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1994"
},
{
"name": "Makefile",
"bytes": "5988"
},
{
"name": "Python",
"bytes": "9743"
},
{
"name": "Shell",
"bytes": "6334"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
import traceback
from .param import Variable, Parameterized, graph_key
from .tf_wraps import clip
from ._settings import settings
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class MatBias(Parameterized):
def __init__(self, nodes, n_layers=[],
mean=0.0, stddev=1.0,
variable = tf.Variable,
collections = [graph_key.VARIABLES]):
"""
A simple class that handles matrix and baias pair, w*x + b
input:
- nodes: 2-integer-element list or tuples indicating number of input and
output for the matrix
"""
assert(len(nodes)==2)
Parameterized.__init__(self)
# --- define matrices and biases ---
self.w = variable(shape=[nodes[0], nodes[1]], n_layers=n_layers,
mean=mean, stddev=stddev,
collections=collections)
self.b = variable(shape=[1,nodes[1]], n_layers=n_layers,
mean=mean, stddev=stddev,
collections=collections)
def __call__(self, x):
return clip(tf.matmul(x, self.w) + self.b)
class NeuralNet(Parameterized):
def __init__(self, nodes, n_layers = [],
mean=0.0, stddev=1.0,
variable_types = Variable,
neuron_types = tf.sigmoid,
collections = [graph_key.VARIABLES]):
"""
nodes: list of nodes num of the neural net.
n_layers: number of layers.
variable typs: single or list of Variable object, one of
[tf.Variable or Variational]
types: single or list of n.n. object.
name: name of this neural net
"""
Parameterized.__init__(self)
self.nodes = nodes
# --- variable types ----
if not isinstance(variable_types, list):
variable_types = [variable_types for _ in range(len(nodes)-1)]
else:
variable_types = variable_types
# --- neuron types ----
if not isinstance(neuron_types, list):
self.neuron_types = [neuron_types for _ in range(len(nodes)-2)]
else:
self.neuron_types = neuron_types
# --- define matrices and biases ---
self._matbias_list = []
for i in range(len(nodes)-1):
matbias = MatBias(nodes=[nodes[i], nodes[i+1]],
n_layers=n_layers,
mean=mean, stddev=stddev,
variable = variable_types[i],
collections=collections)
self._matbias_list.append(matbias)
key = 'matbias' + str(i)
name_matbias = self.name + str('.matbias')
setattr(self, key, matbias)
def __call__(self, x):
"""
x: tf.tensor
Returns Op for this neural net.
This method should be executed in tf_mode
"""
y = x
for i in range(len(self.nodes)-2):
typ = self.neuron_types[i]
name_nn = None if self.name is None else self.name + str('.nn')+str(i)
y = typ(self._matbias_list[i](y), name=name_nn)
return self._matbias_list[-1](y)
def __getitem__(self, i):
return self._matbias_list[i]
| {
"content_hash": "ed7e0416c303343b294d0c3a78e88c2a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 82,
"avg_line_length": 39.5632183908046,
"alnum_prop": 0.530796048808832,
"repo_name": "fujii-team/Henbun",
"id": "37b94b00181766fb384062e9f30748c22ba7007a",
"size": "3442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Henbun/nn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "46445"
},
{
"name": "Python",
"bytes": "163970"
}
],
"symlink_target": ""
} |
from __future__ import division
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import print_function
from __future__ import unicode_literals
from flask import Module, request, Response, jsonify
from babel import localedata
from stutuz.extensions import db
mod = Module(__name__)
def jsonerror(msg, code):
return Response(jsonify(error=msg).data,
mimetype='application/json',
status=code)
@mod.route('/entry/')
def entry():
if not request.args.get('id'):
return jsonerror("Missing required argument 'id'.", 400)
if request.args['id'] not in db['entries']:
return jsonerror('Undefined entry.', 404)
if 'locale' in request.args and \
not localedata.exists(request.args['locale']):
return jsonerror('Invalid locale.', 400)
entry = db['entries'][request.args['id']]
data = dict(id=entry.id, type=entry.type)
if hasattr(entry, 'affixes'):
data['affixes'] = [affix.encode('utf-8') for affix in entry.affixes]
if hasattr(entry, 'class_'):
data['class'] = entry.class_
locale = request.args.get('locale')
if entry.history(locale):
definition = entry.history(locale).newest.object
data['definition'] = definition.definition
data['notes'] = definition.notes
return jsonify(data)
| {
"content_hash": "957da27a5fe0cb03bf4c65c1edda7c10",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 31.34090909090909,
"alnum_prop": 0.6519216823785352,
"repo_name": "dag/stutuz",
"id": "077877f4adf0ba9f6f7bcf80e77fab76e82d90ee",
"size": "1402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relvlast/api.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "54577"
}
],
"symlink_target": ""
} |
selected = {}
for l in open("survey.txt", 'r'):
teams = frozenset(l.split())
if teams in selected:
selected[teams] += 1
else:
selected[teams] = 1
notSelected = {"Bel", "Eng", "Ger", "Fra", "Ita", "Spa", "Cam"}
for teams in selected.keys():
notSelected = notSelected.difference(teams)
print("Teams not selected: " + ", ".join(notSelected))
both = 0
for teams, i in selected.items():
if "Bel" in teams and "Ger" in teams:
both += i
print("Fans who like both Bel and Ger: %d" % both)
print("Pair of fans who have the same favorite teams: %d" % len(selected))
| {
"content_hash": "401b091b25852dc8297a8373ff18324f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 35.294117647058826,
"alnum_prop": 0.6183333333333333,
"repo_name": "JoachimVandersmissen/CodingSolutions",
"id": "755b7ee4ab130335011c7f29ca8bea249b82c996",
"size": "600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/PythonForEveryone/session6/4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "48603"
},
{
"name": "Go",
"bytes": "7477"
},
{
"name": "Java",
"bytes": "39690"
},
{
"name": "Objective-C",
"bytes": "926"
},
{
"name": "Prolog",
"bytes": "7245"
},
{
"name": "Python",
"bytes": "69395"
},
{
"name": "TeX",
"bytes": "22704"
},
{
"name": "Visual Basic",
"bytes": "3916"
}
],
"symlink_target": ""
} |
import unittest
import IECore
import Gaffer
import GafferImage
import GafferImageTest
class ConstantTest( GafferImageTest.ImageTestCase ) :
def testChannelData( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 511 ) ), 1 ) )
constant["color"].setValue( IECore.Color4f( 0, 0.25, 0.5, 1 ) )
for i, channel in enumerate( [ "R", "G", "B", "A" ] ) :
channelData = constant["out"].channelData( channel, IECore.V2i( 0 ) )
self.assertEqual( len( channelData ), constant["out"].tileSize() * constant["out"].tileSize() )
expectedValue = constant["color"][i].getValue()
for value in channelData :
self.assertEqual( value, expectedValue )
def testChannelDataHash( self ) :
# The hash for each individual channel should only
# be affected by that particular channel of the colour plug.
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 511 ) ), 1 ) )
constant["color"].setValue( IECore.Color4f( 0 ) )
channels = [ "R", "G", "B", "A" ]
for i, channel in enumerate( channels ) :
h1 = [ constant["out"].channelDataHash( c, IECore.V2i( 0 ) ) for c in channels ]
constant["color"][i].setValue( constant["color"][i].getValue() + .1 )
h2 = [ constant["out"].channelDataHash( c, IECore.V2i( 0 ) ) for c in channels ]
for j in range( 0, len( channels ) ) :
if j == i :
self.assertNotEqual( h1[j], h2[j] )
else :
self.assertEqual( h1[j], h2[j] )
def testFormatHash( self ) :
# Check that the data hash doesn't change when the format does.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
h1 = c["out"].channelData( "R", IECore.V2i( 0 ) ).hash()
c["format"].setValue( GafferImage.Format( 1920, 1080, 1. ) )
h2 = c["out"].channelData( "R", IECore.V2i( 0 ) ).hash()
self.assertEqual( h1, h2 )
def testTileHashes( self ) :
# Test that two tiles within the image have the same hash.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
c["color"][0].setValue( .5 )
self.assertEqual(
c["out"].channelDataHash( "R", IECore.V2i( 0 ) ),
c["out"].channelDataHash( "R", IECore.V2i( GafferImage.ImagePlug().tileSize() ) ),
)
def testTileIdentity( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
# The channelData() binding returns a copy by default, so we wouldn't
# expect two tiles to be referencing the same object.
self.assertFalse(
c["out"].channelData( "R", IECore.V2i( 0 ) ).isSame(
c["out"].channelData( "R", IECore.V2i( GafferImage.ImagePlug.tileSize() ) )
)
)
# But behind the scenes we do want them to be the same, so
# check that that is the case.
self.assertTrue(
c["out"].channelData( "R", IECore.V2i( 0 ), _copy = False ).isSame(
c["out"].channelData( "R", IECore.V2i( GafferImage.ImagePlug.tileSize() ), _copy = False )
)
)
def testEnableBehaviour( self ) :
c = GafferImage.Constant()
self.assertTrue( c.enabledPlug().isSame( c["enabled"] ) )
self.assertEqual( c.correspondingInput( c["out"] ), None )
self.assertEqual( c.correspondingInput( c["color"] ), None )
self.assertEqual( c.correspondingInput( c["format"] ), None )
def testChannelNamesHash( self ) :
c = GafferImage.Constant()
h1 = c["out"]["channelNames"].hash()
c["color"].setValue( IECore.Color4f( 1, 0.5, 0.25, 1 ) )
h2 = c["out"]["channelNames"].hash()
self.assertEqual( h1, h2 )
def testSerialisationWithZeroAlpha( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Constant()
s["c"]["color"].setValue( IECore.Color4f( 0, 1, 0, 0 ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["c"]["color"].getValue(), IECore.Color4f( 0, 1, 0, 0 ) )
def testFormatDependencies( self ) :
c = GafferImage.Constant()
self.assertEqual(
c.affects( c["format"]["displayWindow"]["min"]["x"] ),
[ c["out"]["format"], c["out"]["dataWindow"] ],
)
self.assertEqual(
c.affects( c["format"]["pixelAspect"] ),
[ c["out"]["format"] ],
)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "440e917ad3ed9640803abd307be0cda0",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 108,
"avg_line_length": 32.465648854961835,
"alnum_prop": 0.6447213731483659,
"repo_name": "chippey/gaffer",
"id": "d93a3301a35663264b31a9930568ca7f073837b5",
"size": "6113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferImageTest/ConstantTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2258"
},
{
"name": "C++",
"bytes": "5420141"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Objective-C",
"bytes": "2228"
},
{
"name": "Python",
"bytes": "5348174"
},
{
"name": "Shell",
"bytes": "8370"
},
{
"name": "Slash",
"bytes": "41159"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from six import text_type
from typing import Any, Dict, List, Tuple, Optional, Sequence, Callable, Union
from django.db import connection
from django.db.models.query import QuerySet
from django.template import RequestContext, loader
from django.core import urlresolvers
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from jinja2 import Markup as mark_safe
from zerver.decorator import has_request_variables, REQ, zulip_internal
from zerver.models import get_realm, UserActivity, UserActivityInterval, Realm
from zerver.lib.timestamp import timestamp_to_datetime
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import time
import re
import pytz
from six.moves import filter
from six.moves import map
from six.moves import range
from six.moves import zip
eastern_tz = pytz.timezone('US/Eastern')
from zproject.jinja2 import render_to_response
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.domain,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.domain,
age
order by
r.domain,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['domain']][row['age']] = row['cnt']
result = {}
for domain in counts:
raw_cnts = [counts[domain].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[domain] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.domain,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, domain ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['domain']]['cnts']
except:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
domain = row['domain']
minutes = realm_minutes.get(domain, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except:
pass
# formatting
for row in rows:
row['domain'] = realm_activity_link(row['domain'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
domain='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__domain'
).order_by(
'user_profile__realm__domain',
'user_profile__email'
)
by_domain = lambda row: row.user_profile.realm.domain
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for domain, realm_intervals in itertools.groupby(all_intervals, by_domain):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (domain,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[domain] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Domain':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.domain,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by domain, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, up.id, client.name
''' % (mobile_type,)
cols = [
'Domain',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.domain,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by domain, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, client.name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by domain'
query = '''
select
realm.domain,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by domain, client_name
having max(last_visit) > now() - interval '2 week'
order by domain, client_name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.domain,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, domain
having max(last_visit) > now() - interval '2 week'
order by client_name, domain
'''
cols = [
'Client',
'Domain',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__domain=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm=realm))
realm_link = '<a href="%s">%s</a>' % (url, realm)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[text_type]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val # type: ignore # datetie.now tzinfo bug.
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Sequence[str]]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android'
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = get_realm(realm).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm)
data += [(page_title, content)]
fix_name = lambda realm: realm.replace('.', '_')
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (fix_name(realm),)
title = realm
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
| {
"content_hash": "3971eee47884fee60e01560184e9acbd",
"timestamp": "",
"source": "github",
"line_count": 925,
"max_line_length": 106,
"avg_line_length": 30.393513513513515,
"alnum_prop": 0.5269616561143914,
"repo_name": "umkay/zulip",
"id": "3f48f187ec24d0b10a7067386a7d3bdd6906b85e",
"size": "28114",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "analytics/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231487"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "440781"
},
{
"name": "JavaScript",
"bytes": "1411291"
},
{
"name": "Nginx",
"bytes": "1229"
},
{
"name": "PHP",
"bytes": "18929"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86728"
},
{
"name": "Python",
"bytes": "2720293"
},
{
"name": "Ruby",
"bytes": "249738"
},
{
"name": "Shell",
"bytes": "34425"
}
],
"symlink_target": ""
} |
import numpy as np
from capstone.datasets.ucic4 import get_random_win_game, get_random_loss_game
from capstone.game.players.kerasplayer import KerasStatePlayer
from capstone.game.players import RandPlayer
from capstone.game.utils import play_match, play_series
from capstone.utils import print_aec, str_aec
keras = KerasStatePlayer('models/episode-14500-winpct-0.942')
rnd = RandPlayer()
N_EVALUATIONS = 100
N_MATCHES_PER_EVALUATION = 100
def run_evaluation(generator, players, expected):
'''
Returns the accuracy of the predition.
'''
print 'Running experiment for %s' % expected
outcomes = []
for i in range(N_EVALUATIONS):
print 'Episode %d' % i
results = play_series(
game=generator(),
players=players,
n_matches=N_MATCHES_PER_EVALUATION,
verbose=False
)
outcomes.append(results[expected] / float(N_MATCHES_PER_EVALUATION))
return np.mean(outcomes)
print run_evaluation(
generator=get_random_win_game,
players = [keras, keras],
# players = [rnd, rnd],
expected='W'
)
# print run_evaluation(
# generator=get_random_loss_game,
# players = [keras, keras],
# expected='L'
# )
| {
"content_hash": "6d0303c2d21d4d551a25f7c1579a4861",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 27.704545454545453,
"alnum_prop": 0.6735028712059065,
"repo_name": "davidrobles/mlnd-capstone-code",
"id": "9c22cb5036a6f44120b730d9fe66be81c914d06e",
"size": "1219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/05_exp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "266"
},
{
"name": "Python",
"bytes": "150850"
}
],
"symlink_target": ""
} |
'''Mikes wrapper for the visualizer???'''
from contextlib import contextmanager
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
import OpenGL
__all__ = '''
gl
glu
glut
'''.strip().split()
class ModuleProxy(object):
def __init__(self, name, module):
self.name = name
self.module = module
def __getattr__(self, name):
if name.isupper():
return getattr(self.module, self.name.upper() + '_' + name)
else:
# convert to camel case
name = name.split('_')
name = [x[0].upper() + x[1:] for x in name]
name = ''.join(name)
return getattr(self.module, self.name + name)
class GLProxy(ModuleProxy):
@contextmanager
def matrix(self):
self.module.glPushMatrix()
try:
yield
finally:
self.module.glPopMatrix()
@contextmanager
def attrib(self, *args):
mask = 0
for arg in args:
if isinstance(arg, basestring):
arg = getattr(self.module, 'GL_%s_BIT' % arg.upper())
mask |= arg
self.module.glPushAttrib(mask)
try:
yield
finally:
self.module.glPopAttrib()
def enable(self, *args, **kwargs):
self._enable(True, args, kwargs)
return self._apply_on_exit(self._enable, False, args, kwargs)
def disable(self, *args, **kwargs):
self._enable(False, args, kwargs)
return self._apply_on_exit(self._enable, True, args, kwargs)
def _enable(self, enable, args, kwargs):
todo = []
for arg in args:
if isinstance(arg, basestring):
arg = getattr(self.module, 'GL_%s' % arg.upper())
todo.append((arg, enable))
for key, value in kwargs.iteritems():
flag = getattr(self.module, 'GL_%s' % key.upper())
value = value if enable else not value
todo.append((flag, value))
for flag, value in todo:
if value:
self.module.glEnable(flag)
else:
self.module.glDisable(flag)
def begin(self, arg):
if isinstance(arg, basestring):
arg = getattr(self.module, 'GL_%s' % arg.upper())
self.module.glBegin(arg)
return self._apply_on_exit(self.module.glEnd)
@contextmanager
def _apply_on_exit(self, func, *args, **kwargs):
try:
yield
finally:
func(*args, **kwargs)
gl = GLProxy('gl', OpenGL.GL)
glu = ModuleProxy('glu', OpenGL.GLU)
glut = ModuleProxy('glut', OpenGL.GLUT)
| {
"content_hash": "552350a1086a4c9335782165397e41e8",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 71,
"avg_line_length": 27.288659793814432,
"alnum_prop": 0.5477899508877975,
"repo_name": "pupil-labs/PyAV",
"id": "8c755b2b98bad61fea5e30959ebe0d82fe56a416",
"size": "2647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/glproxy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "177"
},
{
"name": "C",
"bytes": "4510"
},
{
"name": "C++",
"bytes": "1923"
},
{
"name": "Makefile",
"bytes": "1753"
},
{
"name": "Python",
"bytes": "372365"
},
{
"name": "Shell",
"bytes": "6455"
}
],
"symlink_target": ""
} |
"""
Pynba
~~~~~
:copyright: (c) 2015 by Xavier Barbosa.
:license: MIT, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
from .local import LOCAL_STACK
from functools import wraps
from pynba.core import logger
__all__ = ['LocalProxy']
class LocalProxy(object):
def __init__(self, **defaults):
object.__setattr__(self, 'defaults', defaults)
def timer(self, **tags):
pynba = LOCAL_STACK.pynba
if pynba:
return pynba.timer(**tags)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
timer = LOCAL_STACK.pynba.timer(**tags)
except (TypeError, AttributeError):
raise RuntimeError('working outside of request context')
with timer:
response = func(*args, **kwargs)
return response
return wrapper
return decorator
def __getattr__(self, name):
try:
return getattr(LOCAL_STACK.pynba, name)
except (TypeError, AttributeError):
if name in self.defaults:
value = self.defaults[name]
logger.warn('working outside of request context '
'render %s with %s', name, value)
return value
raise RuntimeError('working outside of request context')
def __setattr__(self, name, value):
try:
setattr(LOCAL_STACK.pynba, name, value)
except TypeError:
raise RuntimeError('working outside of request context')
def __delattr__(self, name):
try:
delattr(LOCAL_STACK.pynba, name)
except TypeError:
raise RuntimeError('working outside of request context')
| {
"content_hash": "9bfb0df3fad218acc119d9997c427b6e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 29.349206349206348,
"alnum_prop": 0.5581395348837209,
"repo_name": "johnnoone/pynba",
"id": "34fd5f7392363f5d3c165050fc39bfa31a67afb0",
"size": "1849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pynba/stacked/globals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Protocol Buffer",
"bytes": "843"
},
{
"name": "Python",
"bytes": "74265"
}
],
"symlink_target": ""
} |
"""habersitesi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from haber.views import (index, haber_detail, category_detail, register_user, login_user, logout_user, IletisimForm )
urlpatterns = [
url(r'^$', index, name='home'),
url(r'^category/(?P<category_id>[0-9]+)/$', category_detail, name='ctgry_detail'),
url(r'^haber/(?P<haber_id>[0-9]+)/$', haber_detail, name='hbr_detail'),
url(r'^admin/', include(admin.site.urls)),
url(r'^register$', register_user, name='register'),
url(r'^iletisim', IletisimForm, name='iletisim'),
url(r'^login', login_user, name='login'),
url(r'^logout', logout_user, name='logout'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| {
"content_hash": "c1495c80466bb0caa7217f39f8620296",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 117,
"avg_line_length": 44.5625,
"alnum_prop": 0.6914446002805049,
"repo_name": "hirmak/habersitesi",
"id": "b8ed7d71bf191a7510c4348caa093d47c37eb50b",
"size": "1426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "habersitesi/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2219"
},
{
"name": "HTML",
"bytes": "3182"
},
{
"name": "Python",
"bytes": "14420"
}
],
"symlink_target": ""
} |
"""
__find_two_match_elements_same_type_diff_rules_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: levi
Modified: Thu Aug 22 11:40:12 2013
__________________________________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from MT_pre__MatchModel import *
from MT_pre__MetaModelElement_S import *
from MT_pre__match_contains import *
from LHS import *
from graph_MT_pre__MetaModelElement_S import *
from graph_MT_pre__match_contains import *
from graph_MT_pre__MatchModel import *
from graph_LHS import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def find_two_match_elements_same_type_diff_rules_MDL(self, rootNode, MT_pre__GM2AUTOSAR_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__GM2AUTOSAR_MM ---
if( MT_pre__GM2AUTOSAR_MMRootNode ):
# author
MT_pre__GM2AUTOSAR_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__GM2AUTOSAR_MMRootNode.description.setValue('\n')
MT_pre__GM2AUTOSAR_MMRootNode.description.setHeight(15)
# name
MT_pre__GM2AUTOSAR_MMRootNode.name.setValue('')
MT_pre__GM2AUTOSAR_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('FindTwoMatchElementsSameTypeDiffRules')
# --- ASG attributes over ---
self.obj71=MT_pre__MatchModel(self)
self.obj71.isGraphObjectVisual = True
if(hasattr(self.obj71, '_setHierarchicalLink')):
self.obj71._setHierarchicalLink(False)
# MT_label__
self.obj71.MT_label__.setValue('3')
# MT_pivotOut__
self.obj71.MT_pivotOut__.setValue('')
self.obj71.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj71.MT_subtypeMatching__.setValue(('True', 0))
self.obj71.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj71.MT_pivotIn__.setValue('')
self.obj71.MT_pivotIn__.setNone()
self.obj71.graphClass_= graph_MT_pre__MatchModel
if self.genGraphics:
new_obj = graph_MT_pre__MatchModel(60.0,80.0,self.obj71)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__MatchModel", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj71.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj71)
self.globalAndLocalPostcondition(self.obj71, rootNode)
self.obj71.postAction( rootNode.CREATE )
self.obj72=MT_pre__MetaModelElement_S(self)
self.obj72.isGraphObjectVisual = True
if(hasattr(self.obj72, '_setHierarchicalLink')):
self.obj72._setHierarchicalLink(False)
# MT_pivotOut__
self.obj72.MT_pivotOut__.setValue('element1')
# MT_subtypeMatching__
self.obj72.MT_subtypeMatching__.setValue(('True', 1))
self.obj72.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj72.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj72.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj72.MT_pivotIn__.setValue('element1')
# MT_label__
self.obj72.MT_label__.setValue('1')
# MT_pre__cardinality
self.obj72.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj72.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj72.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj72.MT_pre__name.setHeight(15)
self.obj72.graphClass_= graph_MT_pre__MetaModelElement_S
if self.genGraphics:
new_obj = graph_MT_pre__MetaModelElement_S(60.0,180.0,self.obj72)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__MetaModelElement_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj72.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj72)
self.globalAndLocalPostcondition(self.obj72, rootNode)
self.obj72.postAction( rootNode.CREATE )
self.obj73=MT_pre__MetaModelElement_S(self)
self.obj73.isGraphObjectVisual = True
if(hasattr(self.obj73, '_setHierarchicalLink')):
self.obj73._setHierarchicalLink(False)
# MT_pivotOut__
self.obj73.MT_pivotOut__.setValue('element2')
# MT_subtypeMatching__
self.obj73.MT_subtypeMatching__.setValue(('True', 1))
self.obj73.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj73.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj73.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj73.MT_pivotIn__.setValue('element2')
# MT_label__
self.obj73.MT_label__.setValue('2')
# MT_pre__cardinality
self.obj73.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj73.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj73.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj73.MT_pre__name.setHeight(15)
self.obj73.graphClass_= graph_MT_pre__MetaModelElement_S
if self.genGraphics:
new_obj = graph_MT_pre__MetaModelElement_S(280.0,180.0,self.obj73)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__MetaModelElement_S", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj73.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj73)
self.globalAndLocalPostcondition(self.obj73, rootNode)
self.obj73.postAction( rootNode.CREATE )
self.obj74=MT_pre__match_contains(self)
self.obj74.isGraphObjectVisual = True
if(hasattr(self.obj74, '_setHierarchicalLink')):
self.obj74._setHierarchicalLink(False)
# MT_label__
self.obj74.MT_label__.setValue('5')
# MT_pivotOut__
self.obj74.MT_pivotOut__.setValue('')
self.obj74.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj74.MT_subtypeMatching__.setValue(('True', 0))
self.obj74.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj74.MT_pivotIn__.setValue('')
self.obj74.MT_pivotIn__.setNone()
self.obj74.graphClass_= graph_MT_pre__match_contains
if self.genGraphics:
new_obj = graph_MT_pre__match_contains(227.0,263.5,self.obj74)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__match_contains", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj74.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj74)
self.globalAndLocalPostcondition(self.obj74, rootNode)
self.obj74.postAction( rootNode.CREATE )
self.obj80=LHS(self)
self.obj80.isGraphObjectVisual = True
if(hasattr(self.obj80, '_setHierarchicalLink')):
self.obj80._setHierarchicalLink(False)
# constraint
self.obj80.constraint.setValue('if PreNode(\'1\')[\'classtype\'] == PreNode(\'2\')[\'classtype\']:\n if len([i for i in graph.neighbors(PreNode(\'2\').index) if graph.vs[i][\'mm__\'] == \'match_contains\']) == 0:\n return True\n\nreturn False\n')
self.obj80.constraint.setHeight(15)
self.obj80.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(20.0,20.0,self.obj80)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj80.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj80)
self.globalAndLocalPostcondition(self.obj80, rootNode)
self.obj80.postAction( rootNode.CREATE )
# Connections for obj71 (graphObject_: Obj0) of type MT_pre__MatchModel
self.drawConnections(
(self.obj71,self.obj74,[219.0, 153.0, 227.0, 263.5],"true", 2) )
# Connections for obj72 (graphObject_: Obj1) of type MT_pre__MetaModelElement_S
self.drawConnections(
)
# Connections for obj73 (graphObject_: Obj2) of type MT_pre__MetaModelElement_S
self.drawConnections(
)
# Connections for obj74 (graphObject_: Obj3) of type MT_pre__match_contains
self.drawConnections(
(self.obj74,self.obj72,[227.0, 263.5, 230.0, 254.0],"true", 2) )
# Connections for obj80 (graphObject_: Obj9) of type LHS
self.drawConnections(
)
newfunction = find_two_match_elements_same_type_diff_rules_MDL
loadedMMName = ['MT_pre__GM2AUTOSAR_MM_META', 'MoTifRule_META']
atom3version = '0.3'
| {
"content_hash": "f7033fd3fc5e68c55ebfdc6414d706df",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 625,
"avg_line_length": 46.763636363636365,
"alnum_prop": 0.6412130637636081,
"repo_name": "levilucio/SyVOLT",
"id": "1fd8a4fe88e7d49111e4dc448eab5324d43798f0",
"size": "12860",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/merge_preprocess_rules/models/find_two_match_elements_same_type_diff_rules_MDL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""Classes to read from and write to SQLite databases."""
import re
import sqlite3
class Sqlite3DatabaseFile(object):
"""SQLite3 database file."""
_HAS_TABLE_QUERY = (
'SELECT name FROM sqlite_master '
'WHERE type = "table" AND name = "{0:s}"')
def __init__(self):
"""Initializes a database file."""
super(Sqlite3DatabaseFile, self).__init__()
self._connection = None
self._cursor = None
self.filename = None
self.read_only = None
def Close(self):
"""Closes the database file.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot close database not opened.')
# We need to run commit or not all data is stored in the database.
self._connection.commit()
self._connection.close()
self._connection = None
self._cursor = None
self.filename = None
self.read_only = None
def CreateTable(self, table_name, column_definitions):
"""Creates a table.
Args:
table_name (str): table name.
column_definitions (list[str]): column definitions.
Raises:
RuntimeError: if the database is not opened or
if the database is in read-only mode.
"""
if not self._connection:
raise RuntimeError('Cannot create table database not opened.')
if self.read_only:
raise RuntimeError('Cannot create table database in read-only mode.')
column_definitions = ', '.join(column_definitions)
self._cursor.execute(
f'CREATE TABLE {table_name:s} ( {column_definitions:s} )')
def GetValues(self, table_names, column_names, condition):
"""Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): condition.
Yields:
sqlite3.row: a row.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot retrieve values database not opened.')
table_names = ', '.join(table_names)
column_names_string = ', '.join(column_names)
sql_query = f'SELECT {column_names_string:s} FROM {table_names:s}'
if condition:
sql_query = ''.join([sql_query, f' WHERE {condition:s}'])
self._cursor.execute(sql_query)
for row in self._cursor:
values = {}
for column_index, column_name in enumerate(column_names):
values[column_name] = row[column_index]
yield values
def HasTable(self, table_name):
"""Determines if a specific table exists.
Args:
table_name (str): table name.
Returns:
bool: True if the table exists, False otheriwse.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError(
'Cannot determine if table exists database not opened.')
sql_query = self._HAS_TABLE_QUERY.format(table_name)
self._cursor.execute(sql_query)
return bool(self._cursor.fetchone())
def InsertValues(self, table_name, column_names, values):
"""Inserts values into a table.
Args:
table_name (str): table name.
column_names (list[str]): column names.
values (list[str]): values formatted as a string.
Raises:
RuntimeError: if the database is not opened or
if the database is in read-only mode or
if an unsupported value type is encountered.
"""
if not self._connection:
raise RuntimeError('Cannot insert values database not opened.')
if self.read_only:
raise RuntimeError('Cannot insert values database in read-only mode.')
if not values:
return
sql_values = []
for value in values:
if isinstance(value, str):
# In sqlite3 the double quote is escaped with a second double quote.
value = re.sub('"', '""', value)
value = f'"{value:s}"'
elif isinstance(value, int):
value = f'{value:d}'
elif isinstance(value, float):
value = f'{value:f}'
elif value is None:
value = 'NULL'
else:
value_type = type(value)
raise RuntimeError(f'Unsupported value type: {value_type!s}.')
sql_values.append(value)
column_names = ', '.join(column_names)
sql_values = ', '.join(sql_values)
self._cursor.execute(
f'INSERT INTO {table_name:s} ( {column_names:s} ) '
f'VALUES ( {sql_values:s} )')
def Open(self, filename, read_only=False):
"""Opens the database file.
Args:
filename (str): filename of the database.
read_only (Optional[bool]): True if the database should be opened in
read-only mode. Since sqlite3 does not support a real read-only
mode we fake it by only permitting SELECT queries.
Returns:
bool: True if successful or False if not.
Raises:
RuntimeError: if the database is already opened.
"""
if self._connection:
raise RuntimeError('Cannot open database already opened.')
self.filename = filename
self.read_only = read_only
self._connection = sqlite3.connect(filename)
if not self._connection:
return False
self._cursor = self._connection.cursor()
if not self._cursor:
return False
return True
class Sqlite3DatabaseReader(object):
"""SQLite3 database reader."""
def __init__(self):
"""Initializes a database reader."""
super(Sqlite3DatabaseReader, self).__init__()
self._database_file = Sqlite3DatabaseFile()
def Close(self):
"""Closes the database reader object."""
self._database_file.Close()
def Open(self, filename):
"""Opens the database reader object.
Args:
filename (str): filename of the database.
Returns:
bool: True if successful or False if not.
"""
return self._database_file.Open(filename, read_only=True)
class Sqlite3DatabaseWriter(object):
"""SQLite3 database writer."""
def __init__(self):
"""Initializes a database writer."""
super(Sqlite3DatabaseWriter, self).__init__()
self._database_file = Sqlite3DatabaseFile()
def Close(self):
"""Closes the database writer object."""
self._database_file.Close()
def Open(self, filename):
"""Opens the database writer object.
Args:
filename (str): filename of the database.
Returns:
bool: True if successful or False if not.
"""
self._database_file.Open(filename)
return True
class EseDbCatalogSqlite3DatabaseWriter(Sqlite3DatabaseWriter):
"""ESE database catolog SQLite3 writer."""
def _GetDatabaseDefinitionKey(self, ese_database_definition):
"""Retrieves the key of a database definition.
Args:
ese_database_definition (EseDatabaseDefinition): database definition.
Returns:
int: database definition key or None if no such value.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_names = ['database_definitions']
column_names = ['database_definition_key']
condition = (
f'type = "{ese_database_definition.type:s}" AND '
f'version = "{ese_database_definition.version:s}"')
values_list = list(self._database_file.GetValues(
table_names, column_names, condition))
number_of_values = len(values_list)
if number_of_values == 0:
return None
if number_of_values == 1:
values = values_list[0]
return values['database_definition_key']
raise RuntimeError('More than one value found in database.')
def GetTableDefinitionKey(self, ese_table_definition):
"""Retrieves the key of a database definition.
Args:
ese_table_definition (EseTableDefinition): database definition.
Returns:
int: database definition key or None if no such value.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_names = ['table_definitions']
column_names = ['table_definition_key']
condition = f'name = "{ese_table_definition.name:s}"'
values_list = list(self._database_file.GetValues(
table_names, column_names, condition))
number_of_values = len(values_list)
if number_of_values == 0:
return None
if number_of_values == 1:
values = values_list[0]
return values['table_definition_key']
raise RuntimeError('More than one value found in database.')
def WriteColumnDefinition(self, table_definition_key, ese_column_definition):
"""Writes the column definition.
Args:
table_definition_key (int): table definition key.
ese_column_definition (EseColumnDefinition): column definition.
"""
table_name = 'column_definitions'
column_names = ['identifier', 'name', 'type', 'table_definition_key']
has_table = self._database_file.HasTable(table_name)
if not has_table:
column_definitions = [
'column_definition_key INTEGER PRIMARY KEY AUTOINCREMENT',
'identifier TEXT', 'name TEXT', 'type TEXT',
'table_definition_key INTEGER']
self._database_file.CreateTable(table_name, column_definitions)
insert_values = True
else:
condition = (
f'name = "{ese_column_definition.name:s}" AND '
f'table_definition_key = {table_definition_key:d}')
values_list = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values_list)
# TODO: check if more than 1 result.
insert_values = number_of_values == 0
if insert_values:
values = [
ese_column_definition.identifier, ese_column_definition.name,
ese_column_definition.type, table_definition_key]
self._database_file.InsertValues(table_name, column_names, values)
def WriteDatabaseDefinition(self, ese_database_definition):
"""Writes the database definition.
Args:
ese_database_definition (EseDatabaseDefinition): database definition.
"""
table_name = 'database_definitions'
column_names = ['type', 'version']
has_table = self._database_file.HasTable(table_name)
if not has_table:
column_definitions = [
'database_definition_key INTEGER PRIMARY KEY AUTOINCREMENT',
'type TEXT', 'version TEXT']
self._database_file.CreateTable(table_name, column_definitions)
insert_values = True
else:
condition = (
f'type = "{ese_database_definition.type:s}" AND '
f'version = "{ese_database_definition.version:s}"')
values_list = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values_list)
# TODO: check if more than 1 result.
insert_values = number_of_values == 0
if insert_values:
values = [ese_database_definition.type, ese_database_definition.version]
self._database_file.InsertValues(table_name, column_names, values)
def WriteTableDefinition(self, ese_table_definition):
"""Writes the table definition.
Args:
ese_table_definition (EseTableDefinition): table definition.
"""
table_name = 'table_definitions'
column_names = ['name']
has_table = self._database_file.HasTable(table_name)
if not has_table:
column_definitions = [
'table_definition_key INTEGER PRIMARY KEY AUTOINCREMENT',
'name TEXT']
self._database_file.CreateTable(table_name, column_definitions)
insert_values = True
else:
condition = f'name = "{ese_table_definition.name:s}"'
values_list = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values_list)
# TODO: check if more than 1 result.
insert_values = number_of_values == 0
if insert_values:
values = [ese_table_definition.name]
self._database_file.InsertValues(table_name, column_names, values)
| {
"content_hash": "109522765e00f5cc7faada0f9044d363",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 79,
"avg_line_length": 29.703241895261847,
"alnum_prop": 0.6490638905213668,
"repo_name": "libyal/esedb-kb",
"id": "66660bb24d9a24e6cdd6c1b7bccab68d5e85c125",
"size": "11935",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "esedbrc/database.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "1075"
},
{
"name": "Python",
"bytes": "65360"
},
{
"name": "Shell",
"bytes": "1184"
}
],
"symlink_target": ""
} |
def sanitize(time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
return(time_string)
(mins, secs) = time_string.split(splitter)
return(mins + '.' + secs)
def get_coach_data(filename):
try:
with open(filename) as f:
data = f.readline()
templ = data.strip().split(',')
return({'Name' : templ.pop(0),
'DOB' : templ.pop(0),
'Times': str(sorted(set([sanitize(t) for t in templ]))[0:3])})
except IOError as ioerr:
print('File error: ' + str(ioerr))
return(None)
james = get_coach_data('james2.txt')
julie = get_coach_data('julie2.txt')
mikey = get_coach_data('mikey2.txt')
sarah = get_coach_data('sarah2.txt')
print(james['Name'] + "'s fastest times are: " + james['Times'])
print(julie['Name'] + "'s fastest times are: " + julie['Times'])
print(mikey['Name'] + "'s fastest times are: " + mikey['Times'])
print(sarah['Name'] + "'s fastest times are: " + sarah['Times'])
| {
"content_hash": "218edaa19c926011f0e0411b9b830f92",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 34,
"alnum_prop": 0.5645161290322581,
"repo_name": "tdean1995/HFPythonSandbox",
"id": "1be10a4320dc70ad84951c1dee5f735278517b77",
"size": "1055",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "hfpython_code/hfpy_code/chapter6/page187.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5893"
},
{
"name": "Python",
"bytes": "103816"
}
],
"symlink_target": ""
} |
import json
import uuid
from oslo_log import log as logging
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral.api.controllers import resource
from mistral.api.controllers.v2 import types
from mistral.db.v2 import api as db_api
from mistral import exceptions as exceptions
from mistral.utils import rest_utils
LOG = logging.getLogger(__name__)
SAMPLE = {
'server': 'localhost',
'database': 'temp',
'timeout': 600,
'verbose': True
}
class Environment(resource.Resource):
"""Environment resource."""
id = wtypes.text
name = wtypes.text
description = wtypes.text
variables = types.jsontype
scope = wtypes.Enum(str, 'private', 'public')
created_at = wtypes.text
updated_at = wtypes.text
@classmethod
def sample(cls):
return cls(id=str(uuid.uuid4()),
name='sample',
description='example environment entry',
variables=SAMPLE,
scope='private',
created_at='1970-01-01T00:00:00.000000',
updated_at='1970-01-01T00:00:00.000000')
class Environments(resource.Resource):
"""A collection of Environment resources."""
environments = [Environment]
@classmethod
def sample(cls):
return cls(environments=[Environment.sample()])
class EnvironmentController(rest.RestController):
@wsme_pecan.wsexpose(Environments)
def get_all(self):
"""Return all environments.
Where project_id is the same as the requestor or
project_id is different but the scope is public.
"""
LOG.info("Fetch environments.")
environments = [
Environment.from_dict(db_model.to_dict())
for db_model in db_api.get_environments()
]
return Environments(environments=environments)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(Environment, wtypes.text)
def get(self, name):
"""Return the named environment."""
LOG.info("Fetch environment [name=%s]" % name)
db_model = db_api.get_environment(name)
return Environment.from_dict(db_model.to_dict())
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(Environment, body=Environment, status_code=201)
def post(self, env):
"""Create a new environment."""
LOG.info("Create environment [env=%s]" % env)
self._validate_environment(
json.loads(wsme_pecan.pecan.request.body),
['name', 'description', 'variables']
)
db_model = db_api.create_environment(env.to_dict())
return Environment.from_dict(db_model.to_dict())
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(Environment, body=Environment)
def put(self, env):
"""Update an environment."""
if not env.name:
raise exceptions.InputException(
'Name of the environment is not provided.'
)
LOG.info("Update environment [name=%s, env=%s]" % (env.name, env))
definition = json.loads(wsme_pecan.pecan.request.body)
definition.pop('name')
self._validate_environment(
definition,
['description', 'variables', 'scope']
)
db_model = db_api.update_environment(env.name, env.to_dict())
return Environment.from_dict(db_model.to_dict())
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, name):
"""Delete the named environment."""
LOG.info("Delete environment [name=%s]" % name)
db_api.delete_environment(name)
@staticmethod
def _validate_environment(env_dict, legal_keys):
if env_dict is None:
return
if set(env_dict) - set(legal_keys):
raise exceptions.InputException(
"Please, check your environment definition. Only: "
"%s are allowed as definition keys." % legal_keys
)
| {
"content_hash": "0c6f631f045e4e2caad639b787b13e01",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 74,
"avg_line_length": 29.489208633093526,
"alnum_prop": 0.624786533300805,
"repo_name": "dennybaa/mistral",
"id": "87fdcce5a9bbe0ca3093f9c1bb881b33233ab8e3",
"size": "4709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/api/controllers/v2/environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "1037769"
},
{
"name": "Shell",
"bytes": "18657"
}
],
"symlink_target": ""
} |
"""Functions for computing rich-club coefficients."""
from __future__ import division
import networkx as nx
from networkx.utils import accumulate
from networkx.utils import not_implemented_for
__all__ = ['rich_club_coefficient']
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def rich_club_coefficient(G, normalized=True, Q=100):
r"""Returns the rich-club coefficient of the graph `G`.
For each degree *k*, the *rich-club coefficient* is the ratio of the
number of actual to the number of potential edges for nodes with
degree greater than *k*:
.. math::
\phi(k) = \frac{2 E_k}{N_k (N_k - 1)}
where `N_k` is the number of nodes with degree larger than *k*, and
`E_k` is the number of edges among those nodes.
Parameters
----------
G : NetworkX graph
Undirected graph with neither parallel edges nor self-loops.
normalized : bool (optional)
Normalize using randomized network as in [1]_
Q : float (optional, default=100)
If `normalized` is True, perform `Q * m` double-edge
swaps, where `m` is the number of edges in `G`, to use as a
null-model for normalization.
Returns
-------
rc : dictionary
A dictionary, keyed by degree, with rich-club coefficient values.
Examples
--------
>>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)])
>>> rc = nx.rich_club_coefficient(G, normalized=False)
>>> rc[0] # doctest: +SKIP
0.4
Notes
-----
The rich club definition and algorithm are found in [1]_. This
algorithm ignores any edge weights and is not defined for directed
graphs or graphs with parallel edges or self loops.
Estimates for appropriate values of `Q` are found in [2]_.
References
----------
.. [1] Julian J. McAuley, Luciano da Fontoura Costa,
and Tibério S. Caetano,
"The rich-club phenomenon across complex network hierarchies",
Applied Physics Letters Vol 91 Issue 8, August 2007.
http://arxiv.org/abs/physics/0701290
.. [2] R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon,
"Uniform generation of random graphs with arbitrary degree
sequences", 2006. http://arxiv.org/abs/cond-mat/0312028
"""
if G.number_of_selfloops() > 0:
raise Exception('rich_club_coefficient is not implemented for '
'graphs with self loops.')
rc = _compute_rc(G)
if normalized:
# make R a copy of G, randomize with Q*|E| double edge swaps
# and use rich_club coefficient of R to normalize
R = G.copy(with_data=False)
E = R.number_of_edges()
nx.double_edge_swap(R, Q * E, max_tries=Q * E * 10)
rcran = _compute_rc(R)
rc = {k: v / rcran[k] for k, v in rc.items()}
return rc
def _compute_rc(G):
"""Returns the rich-club coefficient for each degree in the graph
`G`.
`G` is an undirected graph without multiedges.
Returns a dictionary mapping degree to rich-club coefficient for
that degree.
"""
deghist = nx.degree_histogram(G)
total = sum(deghist)
# Compute the number of nodes with degree greater than `k`, for each
# degree `k` (omitting the last entry, which is zero).
nks = (total - cs for cs in accumulate(deghist) if total - cs > 1)
# Create a sorted list of pairs of edge endpoint degrees.
#
# The list is sorted in reverse order so that we can pop from the
# right side of the list later, instead of popping from the left
# side of the list, which would have a linear time cost.
edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()),
reverse=True)
ek = G.number_of_edges()
k1, k2 = edge_degrees.pop()
rc = {}
for d, nk in enumerate(nks):
while k1 <= d:
if len(edge_degrees) == 0:
ek = 0
break
k1, k2 = edge_degrees.pop()
ek -= 1
rc[d] = 2 * ek / (nk * (nk - 1))
return rc
| {
"content_hash": "2ea63c90fc8c4d55031b7dd9709b6682",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 72,
"avg_line_length": 34.73504273504273,
"alnum_prop": 0.6124507874015748,
"repo_name": "cmtm/networkx",
"id": "a3bcaf0d7a60fb7c33f030e97126310a75b250a3",
"size": "4365",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "networkx/algorithms/richclub.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3230773"
}
],
"symlink_target": ""
} |
import unittest
import AutoCorrect
class testLearn(unittest.TestCase):
def setUp(self):
self.Dictionary = AutoCorrect.Dictionary()
def tearDown(self):
self.Dictionary = None
# .learn_word
def test_learn_one_word(self):
"""Test to see if it learns one word"""
self.Dictionary.learn_word('foobar')
expected = [('foobar', 1, [], [], [])]
actual = self.Dictionary.get_dictionary()
self.assertEqual(expected, actual)
def test_learn_one_word_twice(self):
"""Test to see if it learns one word twice"""
self.Dictionary.learn_word('foobar')
self.Dictionary.learn_word('foobar')
expected = [('foobar', 2, [], [], [])]
actual = self.Dictionary.get_dictionary()
self.assertEqual(expected, actual)
def test_learn_two_words(self):
"""Test to see if it learns two different word"""
self.Dictionary.learn_word('foo')
self.Dictionary.learn_word('bar')
expected = [('bar', 1, [], [], []), ('foo', 1, [], [], [])]
actual = self.Dictionary.get_dictionary()
self.assertIn(('bar', 1, [], [], []), actual)
self.assertIn(('foo', 1, [], [], []), actual)
#self.assertEqual(expected, actual)
# .learn_text
def test_learn_text(self):
"""Test to see if it learns a small piece of text"""
self.Dictionary.learn_text('hello world')
expected = [('hello', 1, [], [('world', 1)], []), ('world', 1, [('hello', 1)], [], [])]
actual = self.Dictionary.get_dictionary()
self.assertIn(('hello', 1, [], [('world', 1)], []), actual)
self.assertIn(('world', 1, [('hello', 1)], [], []), actual)
#self.assertEqual(expected, actual)
# .suggestion_feedback
def test_suggestion_feedback(self):
"""Test to see if it learns from suggestions feedback"""
self.Dictionary.learn_text('These are not the droids you are looking for')
self.Dictionary.suggestion_feedback('nor', 'not')
expected = [('not', 3), ('for', 0)]
actual = self.Dictionary.find_similar_words('nor')
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "fbd0427e71832febb976aee183cf3ad5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 89,
"avg_line_length": 29.70149253731343,
"alnum_prop": 0.6412060301507537,
"repo_name": "ericcornelissen/AutoCorrect-py",
"id": "848d07f934a88ab3c0e4a2b91f3dfa4273ff0496",
"size": "1990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testLearning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24839"
}
],
"symlink_target": ""
} |
"""Creates a pair of recurrant models for the Stack Overflow next word prediction task.
Modified version of
tff.simulation.baselines.stackoverflow.create_word_prediction_task and dependent
functions which allows for different sized recurrant models
"""
import functools
import tensorflow as tf
import tensorflow_federated as tff
class TransposableEmbedding(tf.keras.layers.Layer):
"""A Keras layer implementing a transposed projection output layer."""
def __init__(self, embedding_layer: tf.keras.layers.Embedding):
super().__init__()
self.embeddings = embedding_layer.embeddings
# Placing `tf.matmul` under the `call` method is important for backpropagating
# the gradients of `self.embeddings` in graph mode.
def call(self, inputs):
return tf.matmul(inputs, self.embeddings, transpose_b=True)
def create_recurrent_model(vocab_size: int,
embedding_size: int = 96,
num_lstm_layers: int = 1,
lstm_size: int = 670,
shared_embedding: bool = False) -> tf.keras.Model:
"""Constructs a recurrent model with an initial embeding layer.
The resulting model embeds sequences of integer tokens (whose values vary
between `0` and `vocab_size-1`) into an `embedding_size`-dimensional space.
It then applies `num_lstm_layers` LSTM layers, each of size `lstm_size`.
Each LSTM is followed by a dense layer mapping the output to `embedding_size`
units. The model then has a final dense layer mapping to `vocab_size` logits
units. Note that this model does not compute any kind of softmax on the final
logits. This should instead be done in the loss function for the purposes of
backpropagation.
Args:
vocab_size: Vocabulary size to use in the initial embedding layer.
embedding_size: The size of the embedding layer.
num_lstm_layers: The number of LSTM layers in the model.
lstm_size: The size of each LSTM layer.
shared_embedding: If set to `True`, the final layer of the model is a dense
layer given by the transposition of the embedding layer. If `False`, the
final dense layer is instead learned separately.
Returns:
An uncompiled `tf.keras.Model`.
"""
if vocab_size < 1:
raise ValueError('vocab_size must be a positive integer.')
if embedding_size < 1:
raise ValueError('embedding_size must be a positive integer.')
if num_lstm_layers < 1:
raise ValueError('num_lstm_layers must be a positive integer.')
if lstm_size < 1:
raise ValueError('lstm_size must be a positive integer.')
inputs = tf.keras.layers.Input(shape=(None,))
input_embedding = tf.keras.layers.Embedding(
input_dim=vocab_size, output_dim=embedding_size, mask_zero=True)
embedded = input_embedding(inputs)
projected = embedded
for _ in range(num_lstm_layers):
layer = tf.keras.layers.LSTM(lstm_size, return_sequences=True)
processed = layer(projected)
projected = tf.keras.layers.Dense(embedding_size)(processed)
if shared_embedding:
transposed_embedding = TransposableEmbedding(input_embedding)
logits = transposed_embedding(projected)
else:
logits = tf.keras.layers.Dense(vocab_size, activation=None)(projected)
return tf.keras.Model(inputs=inputs, outputs=logits)
def make_big_and_small_stackoverflow_model_fn(my_task,
vocab_size=10000,
num_out_of_vocab_buckets=1,
big_embedding_size=96,
big_lstm_size=670,
small_embedding_size=72,
small_lstm_size=503):
"""Generates two model functions for a given task.
This code is a modified version of
tff.simulation.baselines.stackoverflow.create_word_prediction_task
Args:
my_task: a tff.simulation.baselines.BaselineTask object
vocab_size: an integer specifying the vocab size
num_out_of_vocab_buckets: an integer specifying the number of out of vocab
buckets
big_embedding_size: an integer specifying the size of the embedding layer of
the big model
big_lstm_size: an integer specifying the size of the lstm layer of the big
model
small_embedding_size: an integer specifying the size of the embedding layer
of the small model
small_lstm_size: an integer specifying the size of the lstm layer of the
small model
Returns:
Two model_fn functions
"""
extended_vocab_size = vocab_size + 3 + num_out_of_vocab_buckets
def big_stackoverflownwp_rnn_model_fn():
return tff.learning.from_keras_model(
keras_model=create_recurrent_model(
vocab_size=extended_vocab_size,
embedding_size=big_embedding_size,
lstm_size=big_lstm_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
input_spec=my_task.datasets.element_type_structure,
)
# the standard size corresponding the stackoverflow baseline task
# has embedding_size=96, lstm_size=670
def small_stackoverflownwp_rnn_model_fn():
return tff.learning.from_keras_model(
keras_model=create_recurrent_model(
vocab_size=extended_vocab_size,
embedding_size=small_embedding_size,
lstm_size=small_lstm_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
input_spec=my_task.datasets.element_type_structure,
)
return big_stackoverflownwp_rnn_model_fn, small_stackoverflownwp_rnn_model_fn
def create_conv_dropout_model(conv1_filters=32,
conv2_filters=64,
dense_size=128,
only_digits: bool = True) -> tf.keras.Model:
"""Create a convolutional network with dropout.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
conv1_filters: The number of convolutional filters in the 1st convolutional
layer
conv2_filters: The number of convolutional filters in the 2nd convolutional
layer
dense_size: The number of neurons in the last dense layer
only_digits: If `True`, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If `False`, uses 62 outputs for the larger
dataset.
Returns:
An uncompiled `tf.keras.Model`.
"""
data_format = 'channels_last'
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
conv1_filters,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(
conv2_filters,
kernel_size=(3, 3),
activation='relu',
data_format=data_format),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense_size, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_conv_dropout_model_mfactor(
conv1_filters=32,
conv2_filters=64,
dense_size=128,
mfactor1=1.0,
mfactor2=1.0,
mfactor_dense=1.0,
only_digits: bool = True) -> tf.keras.Model:
"""Create a convolutional network with dropout.
When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
conv2d_1 (Conv2D) (None, 24, 24, 64) 18496
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 12, 12, 64) 0
_________________________________________________________________
dropout (Dropout) (None, 12, 12, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 9216) 0
_________________________________________________________________
dense (Dense) (None, 128) 1179776
_________________________________________________________________
dropout_1 (Dropout) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 1290
=================================================================
Total params: 1,199,882
Trainable params: 1,199,882
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
conv1_filters: The number of convolutional filters in the 1st convolutional
layer
conv2_filters: The number of convolutional filters in the 2nd convolutional
layer
dense_size: The number of neurons in the last dense layer
mfactor1: The multiplicative scaling applied after the first convolutional
layer
mfactor2: The multiplicative scaling applied after the second convolutional
layer
mfactor_dense: The multiplicative scaling applied after the dense layer
only_digits: If `True`, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If `False`, uses 62 outputs for the larger
dataset.
Returns:
An uncompiled `tf.keras.Model`.
"""
data_format = 'channels_last'
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
conv1_filters,
kernel_size=(3, 3),
activation='relu',
data_format=data_format,
input_shape=(28, 28, 1)),
tf.keras.layers.Lambda(lambda x: mfactor1 * x),
tf.keras.layers.Conv2D(
conv2_filters,
kernel_size=(3, 3),
activation='relu',
data_format=data_format),
tf.keras.layers.Lambda(lambda x: mfactor2 * x),
tf.keras.layers.MaxPool2D(pool_size=(2, 2), data_format=data_format),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense_size, activation='relu'),
tf.keras.layers.Lambda(lambda x: mfactor_dense * x),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def create_original_fedavg_cnn_model(
conv1_filters=32,
conv2_filters=64,
dense_size=512,
only_digits: bool = True) -> tf.keras.Model:
"""Create a convolutional network without dropout.
This recreates the CNN model used in the original FedAvg paper,
https://arxiv.org/abs/1602.05629. The number of parameters when
`only_digits=True` is (1,663,370), which matches what is reported in the
paper. When `only_digits=True`, the summary of returned model is
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
reshape (Reshape) (None, 28, 28, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 28, 28, 32) 832
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 14, 14, 64) 51264
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 3136) 0
_________________________________________________________________
dense (Dense) (None, 512) 1606144
_________________________________________________________________
dense_1 (Dense) (None, 10) 5130
=================================================================
Total params: 1,663,370
Trainable params: 1,663,370
Non-trainable params: 0
```
For `only_digits=False`, the last dense layer is slightly larger.
Args:
conv1_filters: The number of convolutional filters in the 1st convolutional
layer
conv2_filters: The number of convolutional filters in the 2nd convolutional
layer
dense_size: The number of neurons in the last dense layer
only_digits: If `True`, uses a final layer with 10 outputs, for use with the
digits only EMNIST dataset. If `False`, uses 62 outputs for the larger
dataset.
Returns:
An uncompiled `tf.keras.Model`.
"""
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
conv2d(filters=conv1_filters, input_shape=(28, 28, 1)),
max_pool(),
conv2d(filters=conv2_filters),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense_size, activation=tf.nn.relu),
tf.keras.layers.Dense(
10 if only_digits else 62, activation=tf.nn.softmax),
])
return model
def make_big_and_small_emnist_cnn_model_fn(my_task,
big_conv1_filters=32,
big_conv2_filters=64,
big_dense_size=512,
small_conv1_filters=24,
small_conv2_filters=48,
small_dense_size=384):
"""Generates two model functions for a given task.
Args:
my_task: a tff.simulation.baselines.BaselineTask object
big_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the big model
big_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the big model
big_dense_size: The number of neurons in the last dense layer of the big
model
small_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the small model
small_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the small model
small_dense_size: The number of neurons in the last dense layer of the small
model
Returns:
Two model_fn functions
"""
def big_model_fn():
return tff.learning.from_keras_model(
keras_model=create_original_fedavg_cnn_model(
only_digits=False,
conv1_filters=big_conv1_filters,
conv2_filters=big_conv2_filters,
dense_size=big_dense_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
def small_model_fn():
return tff.learning.from_keras_model(
keras_model=create_original_fedavg_cnn_model(
only_digits=False,
conv1_filters=small_conv1_filters,
conv2_filters=small_conv2_filters,
dense_size=small_dense_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return big_model_fn, small_model_fn
def make_big_and_small_emnist_cnn_dropout_model_fn(my_task,
big_conv1_filters=32,
big_conv2_filters=64,
big_dense_size=128,
small_conv1_filters=24,
small_conv2_filters=48,
small_dense_size=96):
"""Generates two model functions for a given task.
Args:
my_task: a tff.simulation.baselines.BaselineTask object
big_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the big model
big_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the big model
big_dense_size: The number of neurons in the last dense layer of the big
model
small_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the small model
small_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the small model
small_dense_size: The number of neurons in the last dense layer of the small
model
Returns:
Two model_fn functions.
"""
def big_model_fn():
return tff.learning.from_keras_model(
keras_model=create_conv_dropout_model(
only_digits=False,
conv1_filters=big_conv1_filters,
conv2_filters=big_conv2_filters,
dense_size=big_dense_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
def small_model_fn():
return tff.learning.from_keras_model(
keras_model=create_conv_dropout_model(
only_digits=False,
conv1_filters=small_conv1_filters,
conv2_filters=small_conv2_filters,
dense_size=small_dense_size),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return big_model_fn, small_model_fn
def make_big_and_small_emnist_cnn_dropout_mfactor_model_fn(
my_task,
big_conv1_filters=32,
big_conv2_filters=64,
big_dense_size=128,
small_conv1_filters=24,
small_conv2_filters=48,
small_dense_size=96):
"""Generates two model functions for a given task.
Args:
my_task: a tff.simulation.baselines.BaselineTask object
big_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the big model
big_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the big model
big_dense_size: The number of neurons in the last dense layer of the big
model
small_conv1_filters: The number of convolutional filters in the 1st
convolutional layer of the small model
small_conv2_filters: The number of convolutional filters in the 2nd
convolutional layer of the small model
small_dense_size: The number of neurons in the last dense layer of the small
model
Returns:
Two model_fn functions.
"""
def big_model_fn():
return tff.learning.from_keras_model(
keras_model=create_conv_dropout_model_mfactor(
only_digits=False,
conv1_filters=big_conv1_filters,
conv2_filters=big_conv2_filters,
dense_size=big_dense_size,
mfactor1=1.0,
mfactor2=1.0,
mfactor_dense=1.0),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
def small_model_fn():
return tff.learning.from_keras_model(
keras_model=create_conv_dropout_model_mfactor(
only_digits=False,
conv1_filters=small_conv1_filters,
conv2_filters=small_conv2_filters,
dense_size=small_dense_size,
mfactor1=tf.cast(
big_conv1_filters / small_conv1_filters, tf.float32
), # cast this as a float since these could be integers
mfactor2=tf.cast(big_conv2_filters / small_conv2_filters,
tf.float32),
mfactor_dense=tf.cast(big_dense_size / small_dense_size,
tf.float32)),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=my_task.datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return big_model_fn, small_model_fn
| {
"content_hash": "6b252d5b35171fd15dc7732fad7076c2",
"timestamp": "",
"source": "github",
"line_count": 541,
"max_line_length": 87,
"avg_line_length": 41.42883548983364,
"alnum_prop": 0.5704278766787132,
"repo_name": "google-research/federated",
"id": "4b7d620a8951005371a61d8dd93676704b59876f",
"size": "23012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shrink_unshrink/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76424"
},
{
"name": "Python",
"bytes": "4122952"
},
{
"name": "Shell",
"bytes": "7089"
},
{
"name": "Starlark",
"bytes": "97189"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import hashlib
import importlib
import importlib.machinery
import importlib.util
import os
import sys
import textwrap
import traceback
import warnings
import zipfile
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, NamedTuple
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import Session
from tabulate import tabulate
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import (
AirflowClusterPolicyViolation,
AirflowDagCycleException,
AirflowDagDuplicatedIdException,
AirflowDagInconsistent,
AirflowTimetableInvalid,
ParamValidationError,
RemovedInAirflow3Warning,
)
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.dag_cycle_tester import check_cycle
from airflow.utils.docs import get_docs_url
from airflow.utils.file import correct_maybe_zipped, list_py_file_paths, might_contain_dag
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.retries import MAX_DB_RETRIES, run_with_db_retries
from airflow.utils.session import provide_session
from airflow.utils.timeout import timeout
from airflow.utils.types import NOTSET, ArgNotSet
if TYPE_CHECKING:
import pathlib
class FileLoadStat(NamedTuple):
"""Information about single file"""
file: str
duration: timedelta
dag_num: int
task_num: int
dags: str
class DagBag(LoggingMixin):
"""
A dagbag is a collection of dags, parsed out of a folder tree and has high
level configuration settings, like what database to use as a backend and
what executor to use to fire off tasks. This makes it easier to run
distinct environments for say production and development, tests, or for
different teams or security profiles. What would have been system level
settings are now dagbag level so that one system can run multiple,
independent settings sets.
:param dag_folder: the folder to scan to find DAGs
:param include_examples: whether to include the examples that ship
with airflow or not
:param read_dags_from_db: Read DAGs from DB if ``True`` is passed.
If ``False`` DAGs are read from python files.
:param load_op_links: Should the extra operator link be loaded via plugins when
de-serializing the DAG? This flag is set to False in Scheduler so that Extra Operator links
are not loaded to not run User code in Scheduler.
"""
def __init__(
self,
dag_folder: str | pathlib.Path | None = None,
include_examples: bool | ArgNotSet = NOTSET,
safe_mode: bool | ArgNotSet = NOTSET,
read_dags_from_db: bool = False,
store_serialized_dags: bool | None = None,
load_op_links: bool = True,
collect_dags: bool = True,
):
# Avoid circular import
from airflow.models.dag import DAG
super().__init__()
include_examples = (
include_examples
if isinstance(include_examples, bool)
else conf.getboolean("core", "LOAD_EXAMPLES")
)
safe_mode = (
safe_mode if isinstance(safe_mode, bool) else conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE")
)
if store_serialized_dags:
warnings.warn(
"The store_serialized_dags parameter has been deprecated. "
"You should pass the read_dags_from_db parameter.",
RemovedInAirflow3Warning,
stacklevel=2,
)
read_dags_from_db = store_serialized_dags
dag_folder = dag_folder or settings.DAGS_FOLDER
self.dag_folder = dag_folder
self.dags: dict[str, DAG] = {}
# the file's last modified timestamp when we last read it
self.file_last_changed: dict[str, datetime] = {}
self.import_errors: dict[str, str] = {}
self.has_logged = False
self.read_dags_from_db = read_dags_from_db
# Only used by read_dags_from_db=True
self.dags_last_fetched: dict[str, datetime] = {}
# Only used by SchedulerJob to compare the dag_hash to identify change in DAGs
self.dags_hash: dict[str, str] = {}
self.dagbag_import_error_tracebacks = conf.getboolean("core", "dagbag_import_error_tracebacks")
self.dagbag_import_error_traceback_depth = conf.getint("core", "dagbag_import_error_traceback_depth")
if collect_dags:
self.collect_dags(
dag_folder=dag_folder,
include_examples=include_examples,
safe_mode=safe_mode,
)
# Should the extra operator link be loaded via plugins?
# This flag is set to False in Scheduler so that Extra Operator links are not loaded
self.load_op_links = load_op_links
def size(self) -> int:
""":return: the amount of dags contained in this dagbag"""
return len(self.dags)
@property
def store_serialized_dags(self) -> bool:
"""Whether or not to read dags from DB"""
warnings.warn(
"The store_serialized_dags property has been deprecated. Use read_dags_from_db instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
return self.read_dags_from_db
@property
def dag_ids(self) -> list[str]:
"""
Get DAG ids.
:return: a list of DAG IDs in this bag
"""
return list(self.dags.keys())
@provide_session
def get_dag(self, dag_id, session: Session = None):
"""
Gets the DAG out of the dictionary, and refreshes it if expired
:param dag_id: DAG Id
"""
# Avoid circular import
from airflow.models.dag import DagModel
if self.read_dags_from_db:
# Import here so that serialized dag is only imported when serialization is enabled
from airflow.models.serialized_dag import SerializedDagModel
if dag_id not in self.dags:
# Load from DB if not (yet) in the bag
self._add_dag_from_db(dag_id=dag_id, session=session)
return self.dags.get(dag_id)
# If DAG is in the DagBag, check the following
# 1. if time has come to check if DAG is updated (controlled by min_serialized_dag_fetch_secs)
# 2. check the last_updated column in SerializedDag table to see if Serialized DAG is updated
# 3. if (2) is yes, fetch the Serialized DAG.
# 4. if (2) returns None (i.e. Serialized DAG is deleted), remove dag from dagbag
# if it exists and return None.
min_serialized_dag_fetch_secs = timedelta(seconds=settings.MIN_SERIALIZED_DAG_FETCH_INTERVAL)
if (
dag_id in self.dags_last_fetched
and timezone.utcnow() > self.dags_last_fetched[dag_id] + min_serialized_dag_fetch_secs
):
sd_last_updated_datetime = SerializedDagModel.get_last_updated_datetime(
dag_id=dag_id,
session=session,
)
if not sd_last_updated_datetime:
self.log.warning("Serialized DAG %s no longer exists", dag_id)
del self.dags[dag_id]
del self.dags_last_fetched[dag_id]
del self.dags_hash[dag_id]
return None
if sd_last_updated_datetime > self.dags_last_fetched[dag_id]:
self._add_dag_from_db(dag_id=dag_id, session=session)
return self.dags.get(dag_id)
# If asking for a known subdag, we want to refresh the parent
dag = None
root_dag_id = dag_id
if dag_id in self.dags:
dag = self.dags[dag_id]
if dag.parent_dag:
root_dag_id = dag.parent_dag.dag_id
# If DAG Model is absent, we can't check last_expired property. Is the DAG not yet synchronized?
orm_dag = DagModel.get_current(root_dag_id, session=session)
if not orm_dag:
return self.dags.get(dag_id)
# If the dag corresponding to root_dag_id is absent or expired
is_missing = root_dag_id not in self.dags
is_expired = orm_dag.last_expired and dag and dag.last_loaded < orm_dag.last_expired
if is_expired:
# Remove associated dags so we can re-add them.
self.dags = {
key: dag
for key, dag in self.dags.items()
if root_dag_id != key and not (dag.parent_dag and root_dag_id == dag.parent_dag.dag_id)
}
if is_missing or is_expired:
# Reprocess source file.
found_dags = self.process_file(
filepath=correct_maybe_zipped(orm_dag.fileloc), only_if_updated=False
)
# If the source file no longer exports `dag_id`, delete it from self.dags
if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]:
return self.dags[dag_id]
elif dag_id in self.dags:
del self.dags[dag_id]
return self.dags.get(dag_id)
def _add_dag_from_db(self, dag_id: str, session: Session):
"""Add DAG to DagBag from DB"""
from airflow.models.serialized_dag import SerializedDagModel
row = SerializedDagModel.get(dag_id, session)
if not row:
return None
row.load_op_links = self.load_op_links
dag = row.dag
for subdag in dag.subdags:
self.dags[subdag.dag_id] = subdag
self.dags[dag.dag_id] = dag
self.dags_last_fetched[dag.dag_id] = timezone.utcnow()
self.dags_hash[dag.dag_id] = row.dag_hash
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
Given a path to a python module or zip file, this method imports
the module and look for dag objects within it.
"""
from airflow.models.dag import DagContext
# if the source file no longer exists in the DB or in the filesystem,
# return an empty list
# todo: raise exception?
if filepath is None or not os.path.isfile(filepath):
return []
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if (
only_if_updated
and filepath in self.file_last_changed
and file_last_changed_on_disk == self.file_last_changed[filepath]
):
return []
except Exception as e:
self.log.exception(e)
return []
# Ensure we don't pick up anything else we didn't mean to
DagContext.autoregistered_dags.clear()
if filepath.endswith(".py") or not zipfile.is_zipfile(filepath):
mods = self._load_modules_from_file(filepath, safe_mode)
else:
mods = self._load_modules_from_zip(filepath, safe_mode)
found_dags = self._process_modules(filepath, mods, file_last_changed_on_disk)
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
def _load_modules_from_file(self, filepath, safe_mode):
from airflow.models.dag import DagContext
if not might_contain_dag(filepath, safe_mode):
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info("File %s assumed to contain no DAGs. Skipping.", filepath)
return []
self.log.debug("Importing %s", filepath)
org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1])
path_hash = hashlib.sha1(filepath.encode("utf-8")).hexdigest()
mod_name = f"unusual_prefix_{path_hash}_{org_mod_name}"
if mod_name in sys.modules:
del sys.modules[mod_name]
DagContext.current_autoregister_module_name = mod_name
def parse(mod_name, filepath):
try:
loader = importlib.machinery.SourceFileLoader(mod_name, filepath)
spec = importlib.util.spec_from_loader(mod_name, loader)
new_module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = new_module
loader.exec_module(new_module)
return [new_module]
except Exception as e:
DagContext.autoregistered_dags.clear()
self.log.exception("Failed to import: %s", filepath)
if self.dagbag_import_error_tracebacks:
self.import_errors[filepath] = traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
)
else:
self.import_errors[filepath] = str(e)
return []
dagbag_import_timeout = settings.get_dagbag_import_timeout(filepath)
if not isinstance(dagbag_import_timeout, (int, float)):
raise TypeError(
f"Value ({dagbag_import_timeout}) from get_dagbag_import_timeout must be int or float"
)
if dagbag_import_timeout <= 0: # no parsing timeout
return parse(mod_name, filepath)
timeout_msg = (
f"DagBag import timeout for {filepath} after {dagbag_import_timeout}s.\n"
"Please take a look at these docs to improve your DAG import time:\n"
f"* {get_docs_url('best-practices.html#top-level-python-code')}\n"
f"* {get_docs_url('best-practices.html#reducing-dag-complexity')}"
)
with timeout(dagbag_import_timeout, error_message=timeout_msg):
return parse(mod_name, filepath)
def _load_modules_from_zip(self, filepath, safe_mode):
from airflow.models.dag import DagContext
mods = []
with zipfile.ZipFile(filepath) as current_zip_file:
for zip_info in current_zip_file.infolist():
head, _ = os.path.split(zip_info.filename)
mod_name, ext = os.path.splitext(zip_info.filename)
if ext not in [".py", ".pyc"]:
continue
if head:
continue
if mod_name == "__init__":
self.log.warning("Found __init__.%s at root of %s", ext, filepath)
self.log.debug("Reading %s from %s", zip_info.filename, filepath)
if not might_contain_dag(zip_info.filename, safe_mode, current_zip_file):
# todo: create ignore list
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info(
"File %s:%s assumed to contain no DAGs. Skipping.", filepath, zip_info.filename
)
continue
if mod_name in sys.modules:
del sys.modules[mod_name]
DagContext.current_autoregister_module_name = mod_name
try:
sys.path.insert(0, filepath)
current_module = importlib.import_module(mod_name)
mods.append(current_module)
except Exception as e:
DagContext.autoregistered_dags.clear()
fileloc = os.path.join(filepath, zip_info.filename)
self.log.exception("Failed to import: %s", fileloc)
if self.dagbag_import_error_tracebacks:
self.import_errors[fileloc] = traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
)
else:
self.import_errors[fileloc] = str(e)
finally:
if sys.path[0] == filepath:
del sys.path[0]
return mods
def _process_modules(self, filepath, mods, file_last_changed_on_disk):
from airflow.models.dag import DAG, DagContext # Avoid circular import
top_level_dags = {(o, m) for m in mods for o in m.__dict__.values() if isinstance(o, DAG)}
top_level_dags.update(DagContext.autoregistered_dags)
DagContext.current_autoregister_module_name = None
DagContext.autoregistered_dags.clear()
found_dags = []
for (dag, mod) in top_level_dags:
dag.fileloc = mod.__file__
try:
dag.validate()
self.bag_dag(dag=dag, root_dag=dag)
except AirflowTimetableInvalid as exception:
self.log.exception("Failed to bag_dag: %s", dag.fileloc)
self.import_errors[dag.fileloc] = f"Invalid timetable expression: {exception}"
self.file_last_changed[dag.fileloc] = file_last_changed_on_disk
except (
AirflowClusterPolicyViolation,
AirflowDagCycleException,
AirflowDagDuplicatedIdException,
AirflowDagInconsistent,
ParamValidationError,
) as exception:
self.log.exception("Failed to bag_dag: %s", dag.fileloc)
self.import_errors[dag.fileloc] = str(exception)
self.file_last_changed[dag.fileloc] = file_last_changed_on_disk
else:
found_dags.append(dag)
found_dags += dag.subdags
return found_dags
def bag_dag(self, dag, root_dag):
"""
Adds the DAG into the bag, recurses into sub dags.
:raises: AirflowDagCycleException if a cycle is detected in this dag or its subdags.
:raises: AirflowDagDuplicatedIdException if this dag or its subdags already exists in the bag.
"""
self._bag_dag(dag=dag, root_dag=root_dag, recursive=True)
def _bag_dag(self, *, dag, root_dag, recursive):
"""Actual implementation of bagging a dag.
The only purpose of this is to avoid exposing ``recursive`` in ``bag_dag()``,
intended to only be used by the ``_bag_dag()`` implementation.
"""
check_cycle(dag) # throws if a task cycle is found
dag.resolve_template_files()
dag.last_loaded = timezone.utcnow()
# Check policies
settings.dag_policy(dag)
for task in dag.tasks:
settings.task_policy(task)
subdags = dag.subdags
try:
# DAG.subdags automatically performs DFS search, so we don't recurse
# into further _bag_dag() calls.
if recursive:
for subdag in subdags:
subdag.fileloc = dag.fileloc
subdag.parent_dag = dag
self._bag_dag(dag=subdag, root_dag=root_dag, recursive=False)
prev_dag = self.dags.get(dag.dag_id)
if prev_dag and prev_dag.fileloc != dag.fileloc:
raise AirflowDagDuplicatedIdException(
dag_id=dag.dag_id,
incoming=dag.fileloc,
existing=self.dags[dag.dag_id].fileloc,
)
self.dags[dag.dag_id] = dag
self.log.debug("Loaded DAG %s", dag)
except (AirflowDagCycleException, AirflowDagDuplicatedIdException):
# There was an error in bagging the dag. Remove it from the list of dags
self.log.exception("Exception bagging dag: %s", dag.dag_id)
# Only necessary at the root level since DAG.subdags automatically
# performs DFS to search through all subdags
if recursive:
for subdag in subdags:
if subdag.dag_id in self.dags:
del self.dags[subdag.dag_id]
raise
def collect_dags(
self,
dag_folder: str | pathlib.Path | None = None,
only_if_updated: bool = True,
include_examples: bool = conf.getboolean("core", "LOAD_EXAMPLES"),
safe_mode: bool = conf.getboolean("core", "DAG_DISCOVERY_SAFE_MODE"),
):
"""
Given a file path or a folder, this method looks for python modules,
imports them and adds them to the dagbag collection.
Note that if a ``.airflowignore`` file is found while processing
the directory, it will behave much like a ``.gitignore``,
ignoring files that match any of the patterns specified
in the file.
**Note**: The patterns in ``.airflowignore`` are interpreted as either
un-anchored regexes or gitignore-like glob expressions, depending on
the ``DAG_IGNORE_FILE_SYNTAX`` configuration parameter.
"""
if self.read_dags_from_db:
return
self.log.info("Filling up the DagBag from %s", dag_folder)
dag_folder = dag_folder or self.dag_folder
# Used to store stats around DagBag processing
stats = []
# Ensure dag_folder is a str -- it may have been a pathlib.Path
dag_folder = correct_maybe_zipped(str(dag_folder))
for filepath in list_py_file_paths(
dag_folder,
safe_mode=safe_mode,
include_examples=include_examples,
):
try:
file_parse_start_dttm = timezone.utcnow()
found_dags = self.process_file(filepath, only_if_updated=only_if_updated, safe_mode=safe_mode)
file_parse_end_dttm = timezone.utcnow()
stats.append(
FileLoadStat(
file=filepath.replace(settings.DAGS_FOLDER, ""),
duration=file_parse_end_dttm - file_parse_start_dttm,
dag_num=len(found_dags),
task_num=sum(len(dag.tasks) for dag in found_dags),
dags=str([dag.dag_id for dag in found_dags]),
)
)
except Exception as e:
self.log.exception(e)
self.dagbag_stats = sorted(stats, key=lambda x: x.duration, reverse=True)
def collect_dags_from_db(self):
"""Collects DAGs from database."""
from airflow.models.serialized_dag import SerializedDagModel
with Stats.timer("collect_db_dags"):
self.log.info("Filling up the DagBag from database")
# The dagbag contains all rows in serialized_dag table. Deleted DAGs are deleted
# from the table by the scheduler job.
self.dags = SerializedDagModel.read_all_dags()
# Adds subdags.
# DAG post-processing steps such as self.bag_dag and croniter are not needed as
# they are done by scheduler before serialization.
subdags = {}
for dag in self.dags.values():
for subdag in dag.subdags:
subdags[subdag.dag_id] = subdag
self.dags.update(subdags)
def dagbag_report(self):
"""Prints a report around DagBag loading stats"""
stats = self.dagbag_stats
dag_folder = self.dag_folder
duration = sum((o.duration for o in stats), timedelta()).total_seconds()
dag_num = sum(o.dag_num for o in stats)
task_num = sum(o.task_num for o in stats)
table = tabulate(stats, headers="keys")
report = textwrap.dedent(
f"""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}
{table}
"""
)
return report
@provide_session
def sync_to_db(self, processor_subdir: str | None = None, session: Session = None):
"""Save attributes about list of DAG to the DB."""
# To avoid circular import - airflow.models.dagbag -> airflow.models.dag -> airflow.models.dagbag
from airflow.models.dag import DAG
from airflow.models.serialized_dag import SerializedDagModel
def _serialize_dag_capturing_errors(dag, session):
"""
Try to serialize the dag to the DB, but make a note of any errors.
We can't place them directly in import_errors, as this may be retried, and work the next time
"""
if dag.is_subdag:
return []
try:
# We can't use bulk_write_to_db as we want to capture each error individually
dag_was_updated = SerializedDagModel.write_dag(
dag,
min_update_interval=settings.MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
session=session,
)
if dag_was_updated:
self._sync_perm_for_dag(dag, session=session)
return []
except OperationalError:
raise
except Exception:
self.log.exception("Failed to write serialized DAG: %s", dag.full_filepath)
return [(dag.fileloc, traceback.format_exc(limit=-self.dagbag_import_error_traceback_depth))]
# Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
# of any Operational Errors
# In case of failures, provide_session handles rollback
for attempt in run_with_db_retries(logger=self.log):
with attempt:
serialize_errors = []
self.log.debug(
"Running dagbag.sync_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
self.log.debug("Calling the DAG.bulk_sync_to_db method")
try:
# Write Serialized DAGs to DB, capturing errors
for dag in self.dags.values():
serialize_errors.extend(_serialize_dag_capturing_errors(dag, session))
DAG.bulk_write_to_db(
self.dags.values(), processor_subdir=processor_subdir, session=session
)
except OperationalError:
session.rollback()
raise
# Only now we are "complete" do we update import_errors - don't want to record errors from
# previous failed attempts
self.import_errors.update(dict(serialize_errors))
@provide_session
def _sync_perm_for_dag(self, dag, session: Session = None):
"""Sync DAG specific permissions, if necessary"""
from airflow.security.permissions import DAG_ACTIONS, resource_name_for_dag
from airflow.www.fab_security.sqla.models import Action, Permission, Resource
root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id
def needs_perms(dag_id: str) -> bool:
dag_resource_name = resource_name_for_dag(dag_id)
for permission_name in DAG_ACTIONS:
if not (
session.query(Permission)
.join(Action)
.join(Resource)
.filter(Action.name == permission_name)
.filter(Resource.name == dag_resource_name)
.one_or_none()
):
return True
return False
if dag.access_control or needs_perms(root_dag_id):
self.log.debug("Syncing DAG permissions: %s to the DB", root_dag_id)
from airflow.www.security import ApplessAirflowSecurityManager
security_manager = ApplessAirflowSecurityManager(session=session)
security_manager.sync_perm_for_dag(root_dag_id, dag.access_control)
| {
"content_hash": "3fc6e0ff039970ea88536378b0926a13",
"timestamp": "",
"source": "github",
"line_count": 679,
"max_line_length": 110,
"avg_line_length": 41.359351988217966,
"alnum_prop": 0.5827724958159741,
"repo_name": "apache/airflow",
"id": "8f11d7996f6d573be06507ef3fdefcd1c1147473",
"size": "28870",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/models/dagbag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1EndpointSlice(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'address_type': 'str',
'api_version': 'str',
'endpoints': 'list[V1Endpoint]',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'ports': 'list[DiscoveryV1EndpointPort]'
}
attribute_map = {
'address_type': 'addressType',
'api_version': 'apiVersion',
'endpoints': 'endpoints',
'kind': 'kind',
'metadata': 'metadata',
'ports': 'ports'
}
def __init__(self, address_type=None, api_version=None, endpoints=None, kind=None, metadata=None, ports=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointSlice - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._address_type = None
self._api_version = None
self._endpoints = None
self._kind = None
self._metadata = None
self._ports = None
self.discriminator = None
self.address_type = address_type
if api_version is not None:
self.api_version = api_version
self.endpoints = endpoints
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if ports is not None:
self.ports = ports
@property
def address_type(self):
"""Gets the address_type of this V1EndpointSlice. # noqa: E501
addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. # noqa: E501
:return: The address_type of this V1EndpointSlice. # noqa: E501
:rtype: str
"""
return self._address_type
@address_type.setter
def address_type(self, address_type):
"""Sets the address_type of this V1EndpointSlice.
addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. # noqa: E501
:param address_type: The address_type of this V1EndpointSlice. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and address_type is None: # noqa: E501
raise ValueError("Invalid value for `address_type`, must not be `None`") # noqa: E501
self._address_type = address_type
@property
def api_version(self):
"""Gets the api_version of this V1EndpointSlice. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1EndpointSlice. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1EndpointSlice.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1EndpointSlice. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def endpoints(self):
"""Gets the endpoints of this V1EndpointSlice. # noqa: E501
endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints. # noqa: E501
:return: The endpoints of this V1EndpointSlice. # noqa: E501
:rtype: list[V1Endpoint]
"""
return self._endpoints
@endpoints.setter
def endpoints(self, endpoints):
"""Sets the endpoints of this V1EndpointSlice.
endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints. # noqa: E501
:param endpoints: The endpoints of this V1EndpointSlice. # noqa: E501
:type: list[V1Endpoint]
"""
if self.local_vars_configuration.client_side_validation and endpoints is None: # noqa: E501
raise ValueError("Invalid value for `endpoints`, must not be `None`") # noqa: E501
self._endpoints = endpoints
@property
def kind(self):
"""Gets the kind of this V1EndpointSlice. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1EndpointSlice. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1EndpointSlice.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1EndpointSlice. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1EndpointSlice. # noqa: E501
:return: The metadata of this V1EndpointSlice. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1EndpointSlice.
:param metadata: The metadata of this V1EndpointSlice. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def ports(self):
"""Gets the ports of this V1EndpointSlice. # noqa: E501
ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports. # noqa: E501
:return: The ports of this V1EndpointSlice. # noqa: E501
:rtype: list[DiscoveryV1EndpointPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1EndpointSlice.
ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports. # noqa: E501
:param ports: The ports of this V1EndpointSlice. # noqa: E501
:type: list[DiscoveryV1EndpointPort]
"""
self._ports = ports
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointSlice):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointSlice):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "196580f8a264c56895eb0664559f2070",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 362,
"avg_line_length": 38.73461538461538,
"alnum_prop": 0.6366795750173766,
"repo_name": "kubernetes-client/python",
"id": "71283a866afac5b459bd56ee768b87ad87ddb791",
"size": "10088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_endpoint_slice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
} |
__author__ = 'Christian Heinrich'
__copyright__ = 'Copyright 2014, Haveibeenpwned Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Christian Heinrich'
__email__ = '[email protected]'
__status__ = 'Development' | {
"content_hash": "e46b4bd7a99171393fc75cb6f1acb3cf",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 28.555555555555557,
"alnum_prop": 0.6459143968871596,
"repo_name": "cmlh/Maltego-Transforms",
"id": "77ed79e205d50b1e0e7206c09712423e5b0a3727",
"size": "280",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "canari/haveibeenpwned/src/haveibeenpwned/resources/etc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13107"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0) | {
"content_hash": "7f941d517b11a132c8d02f1087b28f06",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 30.5,
"alnum_prop": 0.7494145199063232,
"repo_name": "jolahde/rak",
"id": "22f469e7c0319967a6b09bd76cdb7b428dea6feb",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polls/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1406"
},
{
"name": "HTML",
"bytes": "3372"
},
{
"name": "Python",
"bytes": "13208"
}
],
"symlink_target": ""
} |
from treap import Treap, PersistentTreap
from sys import getrefcount
t = (Treap(PersistentTreap(Treap([[1], [2]]))))
print(getrefcount(t[0]))
| {
"content_hash": "e000e9078a3e307e137e0d7f5271ed98",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 47,
"avg_line_length": 28.6,
"alnum_prop": 0.7342657342657343,
"repo_name": "DLunin/implicit-treap",
"id": "e290aa2defe12bc65248fce998086f4a58386992",
"size": "143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "33059"
},
{
"name": "Python",
"bytes": "143"
}
],
"symlink_target": ""
} |
import datetime
from email import utils
import test.support
import time
import unittest
import sys
import os.path
class DateTimeTests(unittest.TestCase):
datestring = 'Sun, 23 Sep 2001 20:10:55'
dateargs = (2001, 9, 23, 20, 10, 55)
offsetstring = ' -0700'
utcoffset = datetime.timedelta(hours=-7)
tz = datetime.timezone(utcoffset)
naive_dt = datetime.datetime(*dateargs)
aware_dt = datetime.datetime(*dateargs, tzinfo=tz)
def test_naive_datetime(self):
self.assertEqual(utils.format_datetime(self.naive_dt),
self.datestring + ' -0000')
def test_aware_datetime(self):
self.assertEqual(utils.format_datetime(self.aware_dt),
self.datestring + self.offsetstring)
def test_usegmt(self):
utc_dt = datetime.datetime(*self.dateargs,
tzinfo=datetime.timezone.utc)
self.assertEqual(utils.format_datetime(utc_dt, usegmt=True),
self.datestring + ' GMT')
def test_usegmt_with_naive_datetime_raises(self):
with self.assertRaises(ValueError):
utils.format_datetime(self.naive_dt, usegmt=True)
def test_usegmt_with_non_utc_datetime_raises(self):
with self.assertRaises(ValueError):
utils.format_datetime(self.aware_dt, usegmt=True)
def test_parsedate_to_datetime(self):
self.assertEqual(
utils.parsedate_to_datetime(self.datestring + self.offsetstring),
self.aware_dt)
def test_parsedate_to_datetime_naive(self):
self.assertEqual(
utils.parsedate_to_datetime(self.datestring + ' -0000'),
self.naive_dt)
class LocaltimeTests(unittest.TestCase):
def test_localtime_is_tz_aware_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t = utils.localtime()
self.assertIsNot(t.tzinfo, None)
def test_localtime_is_tz_aware_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t = utils.localtime()
self.assertIsNot(t.tzinfo, None)
def test_localtime_daylight_true_dst_false(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=-1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_false_dst_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=-1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_true_dst_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_false_dst_true(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
@test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_localtime_epoch_utc_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc)
t1 = utils.localtime(t0)
t2 = t0 - datetime.timedelta(hours=5)
t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5)))
self.assertEqual(t1, t2)
@test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_localtime_epoch_utc_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc)
t1 = utils.localtime(t0)
t2 = t0 - datetime.timedelta(hours=5)
t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5)))
self.assertEqual(t1, t2)
def test_localtime_epoch_notz_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(1990, 1, 1)
t1 = utils.localtime(t0)
t2 = utils.localtime(t0.replace(tzinfo=None))
self.assertEqual(t1, t2)
def test_localtime_epoch_notz_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(1990, 1, 1)
t1 = utils.localtime(t0)
t2 = utils.localtime(t0.replace(tzinfo=None))
self.assertEqual(t1, t2)
# XXX: Need a more robust test for Olson's tzdata
@unittest.skipIf(sys.platform.startswith('win'),
"Windows does not use Olson's TZ database")
@unittest.skipUnless(os.path.exists('/usr/share/zoneinfo') or
os.path.exists('/usr/lib/zoneinfo'),
"Can't find the Olson's TZ database")
@test.support.run_with_tz('Europe/Kiev')
def test_variable_tzname(self):
t0 = datetime.datetime(1984, 1, 1, tzinfo=datetime.timezone.utc)
t1 = utils.localtime(t0)
self.assertEqual(t1.tzname(), 'MSK')
t0 = datetime.datetime(1994, 1, 1, tzinfo=datetime.timezone.utc)
t1 = utils.localtime(t0)
self.assertEqual(t1.tzname(), 'EET')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7cfadcdc51181198ba5ae19ba9bdf41a",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 81,
"avg_line_length": 38.75714285714286,
"alnum_prop": 0.6269812016218209,
"repo_name": "olemis/brython",
"id": "e507dd2c03a4a127bbb6f3cde434227f100faffa",
"size": "5426",
"binary": false,
"copies": "25",
"ref": "refs/heads/master",
"path": "www/src/Lib/test/test_email/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "15757"
},
{
"name": "Groff",
"bytes": "21080"
},
{
"name": "HTML",
"bytes": "4919603"
},
{
"name": "JavaScript",
"bytes": "4654888"
},
{
"name": "Makefile",
"bytes": "61"
},
{
"name": "Python",
"bytes": "14166957"
},
{
"name": "R",
"bytes": "2918"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from trove.common import wsgi
from trove.common.auth import admin_context
from trove.extensions.mgmt.volume import models
from trove.extensions.mgmt.volume import views
from trove.openstack.common import log as logging
from trove.common.i18n import _
LOG = logging.getLogger(__name__)
class StorageController(wsgi.Controller):
"""Controller for storage device functionality."""
@admin_context
def index(self, req, tenant_id):
"""Return all storage devices."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Indexing storage info for tenant '%s'") % tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
storages = models.StorageDevices.load(context)
return wsgi.Result(views.StoragesView(storages).data(), 200)
| {
"content_hash": "c1e31606fd910f38c0703ddab5828967",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 72,
"avg_line_length": 36.80952380952381,
"alnum_prop": 0.703751617076326,
"repo_name": "CMSS-BCRDB/RDS",
"id": "3359871f3d902a55b6aa2c7a9ab0259142c225d3",
"size": "1410",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "trove/extensions/mgmt/volume/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60526"
},
{
"name": "Python",
"bytes": "2811396"
},
{
"name": "Shell",
"bytes": "4771"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
} |
from twiki.factory import create_app
from tweepy import TweepError
from wikipedia import WikipediaException
import pytest
import json
import sys
PY3 = sys.version_info[0] > 2
try:
from unittest import mock
except ImportError:
import mock
def make_json(data):
if PY3:
return json.loads(data.decode())
else:
return json.loads(data)
@pytest.fixture(scope='session')
def app():
return create_app('test')
@pytest.yield_fixture
def client(app):
with app.test_client() as c:
yield c
def test_provide_no_tweet_search(client):
resp = client.get('/api/tweets/')
jsoned = make_json(resp.data)
assert 'No search term provided' == jsoned['msg']
assert resp.status_code == 400
def test_twitter_error_handling(client):
api = mock.Mock()
api.search.side_effect = TweepError('Whoops...')
with mock.patch('twiki.exts.twitter._api', api):
resp = client.get('/api/tweets/whoops')
jsoned = make_json(resp.data)
assert "Sorry, we're having trouble with Twitter right now." == jsoned['msg']
assert resp.status_code == 500
def test_process_twitter_response(client):
tweets = [mock.Mock(user=mock.Mock(screen_name='Fred'), text='Some Text', id_str='1')]
with mock.patch('twiki.app.twitter._search', return_value=tweets):
resp = client.get('/api/tweets/Fred')
jsoned = make_json(resp.data)
assert jsoned['tweets'] == [{'user': 'Fred', 'text': 'Some Text',
'url': 'https://twitter.com/Fred/status/1'}]
def test_provide_no_wiki_search(client):
resp = client.get('/api/titles/')
jsoned = make_json(resp.data)
assert 'No search term provided' == jsoned['msg']
assert resp.status_code == 400
def test_wiki_error_handling(client):
wiki = mock.Mock()
wiki.search.side_effect = WikipediaException('Whoops...')
with mock.patch('twiki.exts.wiki._wikipedia', wiki):
resp = client.get('/api/titles/whoops')
jsoned = make_json(resp.data)
assert "Sorry, we're having trouble with Wikipedia right now." == jsoned['msg']
assert resp.status_code == 500
def test_process_wiki_response(client):
titles = ['Python']
with mock.patch('twiki.app.wiki._search', return_value=titles):
resp = client.get('/api/titles/Python')
jsoned = make_json(resp.data)
assert jsoned['titles'] == [{'title': 'Python', 'url': 'https://www.wikipedia.org/wiki/Python'}]
| {
"content_hash": "5dd156776327d9b5f31c50090d031f6e",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 100,
"avg_line_length": 24.87878787878788,
"alnum_prop": 0.6500203004466099,
"repo_name": "justanr/twiki",
"id": "b0d167a77f20c4e65f1e1c0720131a45feb15240",
"size": "2463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6751"
},
{
"name": "JavaScript",
"bytes": "2896"
},
{
"name": "Python",
"bytes": "16948"
}
],
"symlink_target": ""
} |
import numpy as np
from .base import LinearClassifierMixin
from ..feature_selection.selector_mixin import SelectorMixin
from ..svm.base import BaseLibLinear
class LogisticRegression(BaseLibLinear, LinearClassifierMixin, SelectorMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses a one-vs.-all (OvA)
scheme, rather than the "true" multinomial LR.
This class implements L1 and L2 regularized logistic regression using the
`liblinear` library. It can handle both dense and sparse input. Use
C-ordered arrays or CSR matrices containing 64-bit floats for optimal
performance; any other input format will be converted (and copied).
Parameters
----------
penalty : string, 'l1' or 'l2'
Used to specify the norm used in the penalization.
dual : boolean
Dual or primal formulation. Dual formulation is only
implemented for l2 penalty. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Specifies the strength of the regularization. The smaller it is
the bigger is the regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
tol: float, optional
Tolerance for stopping criteria.
Attributes
----------
`coef_` : array, shape = [n_classes-1, n_features]
Coefficient of the features in the decision function.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [n_classes-1]
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True.
See also
--------
LinearSVC
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
References:
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None):
super(LogisticRegression, self).__init__(
penalty=penalty, dual=dual, loss='lr', tol=tol, C=C,
fit_intercept=fit_intercept, intercept_scaling=intercept_scaling,
class_weight=class_weight, random_state=None)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
# 1. / (1. + np.exp(-scores)), computed in-place
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(prob.shape) == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR, not softmax, like Liblinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
| {
"content_hash": "5f4a6c5193790504ceeb9124d20150b6",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 37.28082191780822,
"alnum_prop": 0.6428440198419989,
"repo_name": "lucidfrontier45/scikit-learn",
"id": "71f63edf74b86dbabeac3c34c9bde9bb2bd7898b",
"size": "5443",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/logistic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10562935"
},
{
"name": "C++",
"bytes": "496247"
},
{
"name": "JavaScript",
"bytes": "4775"
},
{
"name": "Python",
"bytes": "3594152"
},
{
"name": "Shell",
"bytes": "687"
}
],
"symlink_target": ""
} |
from botocore.exceptions import ClientError
from botocore.utils import get_service_module_name
class BaseClientExceptions(object):
ClientError = ClientError
def __init__(self, code_to_exception):
"""Base class for exceptions object on a client
:type code_to_exception: dict
:param code_to_exception: Mapping of error codes (strings) to exception
class that should be raised when encountering a particular
error code.
"""
self._code_to_exception = code_to_exception
def from_code(self, error_code):
"""Retrieves the error class based on the error code
This is helpful for identifying the exception class needing to be
caught based on the ClientError.parsed_reponse['Error']['Code'] value
:type error_code: string
:param error_code: The error code associated to a ClientError exception
:rtype: ClientError or a subclass of ClientError
:returns: The appropriate modeled exception class for that error
code. If the error code does not match any of the known
modeled exceptions then return a generic ClientError.
"""
return self._code_to_exception.get(error_code, self.ClientError)
def __getattr__(self, name):
exception_cls_names = [
exception_cls.__name__ for exception_cls
in self._code_to_exception.values()
]
raise AttributeError(
'%r object has no attribute %r. Valid exceptions are: %s' % (
self, name, ', '.join(exception_cls_names)))
class ClientExceptionsFactory(object):
def __init__(self):
self._client_exceptions_cache = {}
def create_client_exceptions(self, service_model):
"""Creates a ClientExceptions object for the particular service client
:type service_model: botocore.model.ServiceModel
:param service_model: The service model for the client
:rtype: object that subclasses from BaseClientExceptions
:returns: The exceptions object of a client that can be used
to grab the various different modeled exceptions.
"""
service_name = service_model.service_name
if service_name not in self._client_exceptions_cache:
client_exceptions = self._create_client_exceptions(service_model)
self._client_exceptions_cache[service_name] = client_exceptions
return self._client_exceptions_cache[service_name]
def _create_client_exceptions(self, service_model):
cls_props = {}
code_to_exception = {}
for error_shape in service_model.error_shapes:
exception_name = str(error_shape.name)
exception_cls = type(exception_name, (ClientError,), {})
cls_props[exception_name] = exception_cls
code = str(error_shape.error_code)
code_to_exception[code] = exception_cls
cls_name = str(get_service_module_name(service_model) + 'Exceptions')
client_exceptions_cls = type(
cls_name, (BaseClientExceptions,), cls_props)
return client_exceptions_cls(code_to_exception)
| {
"content_hash": "e11d9d261fcd599e16827e866afa6d5f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 41.6578947368421,
"alnum_prop": 0.6541377132027796,
"repo_name": "pplu/botocore",
"id": "b192a66dedba286fc48539182dd75680186365a4",
"size": "3727",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "botocore/errorfactory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "23824"
},
{
"name": "Python",
"bytes": "2691062"
}
],
"symlink_target": ""
} |
import calendar
import logging
import json
import uuid
from itertools import chain
from django.contrib.auth import models as auth_models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.urlresolvers import reverse
from django.db import connection, models, transaction
from django.db.models import Q
from django.utils.translation import ugettext as _, ugettext_lazy as _t
from desktop import appmanager
from desktop.lib.i18n import force_unicode
from desktop.lib.exceptions_renderable import PopupException
LOG = logging.getLogger(__name__)
SAMPLE_USERNAME = 'sample'
class UserPreferences(models.Model):
"""Holds arbitrary key/value strings."""
user = models.ForeignKey(auth_models.User)
key = models.CharField(max_length=20)
value = models.TextField(max_length=4096)
class Settings(models.Model):
collect_usage = models.BooleanField(db_index=True, default=True)
tours_and_tutorials = models.BooleanField(db_index=True, default=True)
@classmethod
def get_settings(cls):
settings, created = Settings.objects.get_or_create(id=1)
return settings
class DocumentTagManager(models.Manager):
def get_tags(self, user):
return self.filter(owner=user).distinct()
def create_tag(self, owner, tag_name):
if tag_name in DocumentTag.RESERVED:
raise Exception(_("Can't add %s: it is a reserved tag.") % tag_name)
else:
tag, created = DocumentTag.objects.get_or_create(tag=tag_name, owner=owner)
return tag
def _get_tag(self, user, name):
tag, created = DocumentTag.objects.get_or_create(owner=user, tag=name)
return tag
def get_default_tag(self, user):
return self._get_tag(user, DocumentTag.DEFAULT)
def get_trash_tag(self, user):
return self._get_tag(user, DocumentTag.TRASH)
def get_history_tag(self, user):
return self._get_tag(user, DocumentTag.HISTORY)
def get_example_tag(self, user):
return self._get_tag(user, DocumentTag.EXAMPLE)
def tag(self, owner, doc_id, tag_name='', tag_id=None):
try:
tag = DocumentTag.objects.get(id=tag_id, owner=owner)
if tag.tag in DocumentTag.RESERVED:
raise Exception(_("Can't add %s: it is a reserved tag.") % tag)
except DocumentTag.DoesNotExist:
tag = self._get_tag(user=owner, name=tag_name)
doc = Document.objects.get_doc(doc_id, owner)
doc.add_tag(tag)
return tag
def untag(self, tag_id, owner, doc_id):
tag = DocumentTag.objects.get(id=tag_id, owner=owner)
if tag.tag in DocumentTag.RESERVED:
raise Exception(_("Can't remove %s: it is a reserved tag.") % tag)
doc = Document.objects.get_doc(doc_id, owner=owner)
doc.can_write_or_exception(owner)
doc.remove_tag(tag)
def delete_tag(self, tag_id, owner):
tag = DocumentTag.objects.get(id=tag_id, owner=owner)
default_tag = DocumentTag.objects.get_default_tag(owner)
if tag.tag in DocumentTag.RESERVED:
raise Exception(_("Can't remove %s: it is a reserved tag.") % tag)
else:
tag.delete()
for doc in Document.objects.get_docs(owner).filter(tags=None):
doc.add_tag(default_tag)
def update_tags(self, owner, doc_id, tag_ids):
doc = Document.objects.get_doc(doc_id, owner)
doc.can_write_or_exception(owner)
for tag in doc.tags.all():
if tag.tag not in DocumentTag.RESERVED:
doc.remove_tag(tag)
for tag_id in tag_ids:
tag = DocumentTag.objects.get(id=tag_id, owner=owner)
if tag.tag not in DocumentTag.RESERVED:
doc.add_tag(tag)
return doc
class DocumentTag(models.Model):
"""
Reserved tags can't be manually removed by the user.
"""
owner = models.ForeignKey(auth_models.User, db_index=True)
tag = models.SlugField()
DEFAULT = 'default' # Always there
TRASH = 'trash' # There when the document is trashed
HISTORY = 'history' # There when the document is a submission history
EXAMPLE = 'example' # Hue examples
RESERVED = (DEFAULT, TRASH, HISTORY, EXAMPLE)
objects = DocumentTagManager()
class Meta:
unique_together = ('owner', 'tag')
def __unicode__(self):
return force_unicode('%s') % (self.tag,)
class DocumentManager(models.Manager):
def documents(self, user):
return Document.objects.filter(
Q(owner=user) |
Q(documentpermission__users=user) |
Q(documentpermission__groups__in=user.groups.all())
).defer('description', 'extra').distinct()
def get_docs(self, user, model_class=None, extra=None):
docs = Document.objects.documents(user).exclude(name='pig-app-hue-script')
if model_class is not None:
ct = ContentType.objects.get_for_model(model_class)
docs = docs.filter(content_type=ct)
if extra is not None:
docs = docs.filter(extra=extra)
return docs
def get_doc(self, doc_id, user):
return Document.objects.documents(user).get(id=doc_id)
def trashed_docs(self, model_class, user):
tag = DocumentTag.objects.get_trash_tag(user=user)
return Document.objects.get_docs(user, model_class).filter(tags__in=[tag]).order_by('-last_modified')
def trashed(self, model_class, user):
docs = self.trashed_docs(model_class, user)
return [job.content_object for job in docs if job.content_object]
def available_docs(self, model_class, user, with_history=False):
exclude = [DocumentTag.objects.get_trash_tag(user=user)]
if not with_history:
exclude.append(DocumentTag.objects.get_history_tag(user=user))
return Document.objects.get_docs(user, model_class).exclude(tags__in=exclude).order_by('-last_modified')
def history_docs(self, model_class, user):
include = [DocumentTag.objects.get_history_tag(user=user)]
exclude = [DocumentTag.objects.get_trash_tag(user=user)]
return Document.objects.get_docs(user, model_class).filter(tags__in=include).exclude(tags__in=exclude).order_by('-last_modified')
def available(self, model_class, user, with_history=False):
docs = self.available_docs(model_class, user, with_history)
return [doc.content_object for doc in docs if doc.content_object]
def can_read_or_exception(self, user, doc_class, doc_id, exception_class=PopupException):
if doc_id is None:
return
try:
ct = ContentType.objects.get_for_model(doc_class)
doc = Document.objects.get(object_id=doc_id, content_type=ct)
if doc.can_read(user):
return doc
else:
message = _("Permission denied. %(username)s does not have the permissions required to access document %(id)s") % \
{'username': user.username, 'id': doc.id}
raise exception_class(message)
except Document.DoesNotExist:
raise exception_class(_('Document %(id)s does not exist') % {'id': doc_id})
def can_read(self, user, doc_class, doc_id):
ct = ContentType.objects.get_for_model(doc_class)
doc = Document.objects.get(object_id=doc_id, content_type=ct)
return doc.can_read(user)
def link(self, content_object, owner, name='', description='', extra=''):
if not content_object.doc.exists():
doc = Document.objects.create(
content_object=content_object,
owner=owner,
name=name,
description=description,
extra=extra
)
tag = DocumentTag.objects.get_default_tag(user=owner)
doc.tags.add(tag)
return doc
else:
LOG.warn('Object %s already has documents: %s' % (content_object, content_object.doc.all()))
return content_object.doc.all()[0]
def sync(self):
def find_jobs_with_no_doc(model):
return model.objects.filter(doc__isnull=True).select_related('owner')
try:
with transaction.atomic():
from oozie.models import Workflow, Coordinator, Bundle
for job in chain(
find_jobs_with_no_doc(Workflow),
find_jobs_with_no_doc(Coordinator),
find_jobs_with_no_doc(Bundle)):
doc = Document.objects.link(job, owner=job.owner, name=job.name, description=job.description)
if job.is_trashed:
doc.send_to_trash()
if job.is_shared:
doc.share_to_default()
if hasattr(job, 'managed'):
if not job.managed:
doc.extra = 'jobsub'
doc.save()
except Exception, e:
LOG.exception('error syncing oozie')
try:
with transaction.atomic():
from beeswax.models import SavedQuery
for job in find_jobs_with_no_doc(SavedQuery):
doc = Document.objects.link(job, owner=job.owner, name=job.name, description=job.desc, extra=job.type)
if job.is_trashed:
doc.send_to_trash()
except Exception, e:
LOG.exception('error syncing beeswax')
try:
with transaction.atomic():
from pig.models import PigScript
for job in find_jobs_with_no_doc(PigScript):
Document.objects.link(job, owner=job.owner, name=job.dict['name'], description='')
except Exception, e:
LOG.exception('error syncing pig')
try:
with transaction.atomic():
from search.models import Collection
for dashboard in Collection.objects.all():
col_dict = dashboard.properties_dict['collection']
if not 'uuid' in col_dict:
_uuid = str(uuid.uuid4())
col_dict['uuid'] = _uuid
dashboard.update_properties({'collection': col_dict})
if dashboard.owner is None:
from useradmin.models import install_sample_user
owner = install_sample_user()
else:
owner = dashboard.owner
dashboard_doc = Document2.objects.create(name=dashboard.label, uuid=_uuid, type='search-dashboard', owner=owner, description=dashboard.label, data=dashboard.properties)
Document.objects.link(dashboard_doc, owner=owner, name=dashboard.label, description=dashboard.label, extra='search-dashboard')
dashboard.save()
except Exception, e:
LOG.exception('error syncing search')
try:
with transaction.atomic():
for job in find_jobs_with_no_doc(Document2):
if job.type == 'oozie-workflow2':
extra = 'workflow2'
elif job.type == 'oozie-coordinator2':
extra = 'coordinator2'
elif job.type == 'oozie-bundle2':
extra = 'bundle2'
elif job.type == 'notebook':
extra = 'notebook'
elif job.type == 'search-dashboard':
extra = 'search-dashboard'
else:
extra = ''
doc = Document.objects.link(job, owner=job.owner, name=job.name, description=job.description, extra=extra)
except Exception, e:
LOG.exception('error syncing Document2')
# Make sure doc have at least a tag
try:
for doc in Document.objects.filter(tags=None):
default_tag = DocumentTag.objects.get_default_tag(doc.owner)
doc.tags.add(default_tag)
except Exception, e:
LOG.exception('error adding at least one tag to docs')
# Make sure all the sample user documents are shared.
try:
with transaction.atomic():
for doc in Document.objects.filter(owner__username=SAMPLE_USERNAME):
doc.share_to_default()
tag = DocumentTag.objects.get_example_tag(user=doc.owner)
doc.tags.add(tag)
doc.save()
except Exception, e:
LOG.exception('error sharing sample user documents')
# For now remove the default tag from the examples
try:
for doc in Document.objects.filter(tags__tag=DocumentTag.EXAMPLE):
default_tag = DocumentTag.objects.get_default_tag(doc.owner)
doc.tags.remove(default_tag)
except Exception, e:
LOG.exception('error removing default tags')
# ------------------------------------------------------------------------
LOG.info('Looking for documents that have no object')
# Delete documents with no object.
with transaction.atomic():
# First, delete all the documents that don't have a content type
docs = Document.objects.filter(content_type=None)
if docs:
LOG.info('Deleting %s doc(s) that do not have a content type' % docs.count())
docs.delete()
# Next, it's possible that there are documents pointing at a non-existing
# content_type. We need to do a left join to find these records, but we
# can't do this directly in django. To get around writing wrap sql (which
# might not be portable), we'll use an aggregate to count up all the
# associated content_types, and delete the documents that have a count of
# zero.
#
# Note we're counting `content_type__name` to force the join.
docs = Document.objects \
.values('id') \
.annotate(content_type_count=models.Count('content_type__name')) \
.filter(content_type_count=0)
if docs:
LOG.info('Deleting %s doc(s) that have invalid content types' % docs.count())
docs.delete()
# Finally we need to delete documents with no associated content object.
# This is tricky because of our use of generic foreign keys. So to do
# this a bit more efficiently, we'll start with a query of all the
# documents, then step through each content type and and filter out all
# the documents it's referencing from our document query. Messy, but it
# works.
docs = Document.objects.all()
table_names = connection.introspection.table_names()
for content_type in ContentType.objects.all():
model_class = content_type.model_class()
# Ignore any types that don't have a model.
if model_class is None:
continue
# Ignore types that don't have a table yet.
if model_class._meta.db_table not in table_names:
continue
# Ignore classes that don't have a 'doc'.
if not hasattr(model_class, 'doc'):
continue
# First create a query that grabs all the document ids for this type.
docs_from_content = model_class.objects.values('doc__id')
# Next, filter these from our document query.
docs = docs.exclude(id__in=docs_from_content)
if docs.exists():
LOG.info('Deleting %s documents' % docs.count())
docs.delete()
UTC_TIME_FORMAT = "%Y-%m-%dT%H:%MZ"
class Document(models.Model):
owner = models.ForeignKey(auth_models.User, db_index=True, verbose_name=_t('Owner'), help_text=_t('User who can own the job.'), related_name='doc_owner')
name = models.CharField(default='', max_length=255)
description = models.TextField(default='')
last_modified = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Last modified'))
version = models.SmallIntegerField(default=1, verbose_name=_t('Schema version'))
extra = models.TextField(default='')
tags = models.ManyToManyField(DocumentTag, db_index=True)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
objects = DocumentManager()
class Meta:
unique_together = ('content_type', 'object_id')
def __unicode__(self):
return force_unicode('%s %s %s') % (self.content_type, self.name, self.owner)
def is_editable(self, user):
"""Deprecated by can_read"""
return self.can_write(user)
def can_edit_or_exception(self, user, exception_class=PopupException):
"""Deprecated by can_write_or_exception"""
return self.can_write_or_exception(user, exception_class)
def add_tag(self, tag):
self.tags.add(tag)
def remove_tag(self, tag):
self.tags.remove(tag)
def is_trashed(self):
return DocumentTag.objects.get_trash_tag(user=self.owner) in self.tags.all()
def is_historic(self):
return DocumentTag.objects.get_history_tag(user=self.owner) in self.tags.all()
def send_to_trash(self):
tag = DocumentTag.objects.get_trash_tag(user=self.owner)
self.tags.add(tag)
def restore_from_trash(self):
tag = DocumentTag.objects.get_trash_tag(user=self.owner)
self.tags.remove(tag)
def add_to_history(self):
tag = DocumentTag.objects.get_history_tag(user=self.owner)
self.tags.add(tag)
def remove_from_history(self):
tag = DocumentTag.objects.get_history_tag(user=self.owner)
self.tags.remove(tag)
def share_to_default(self, name='read'):
DocumentPermission.objects.share_to_default(self, name=name)
def can_read(self, user):
return user.is_superuser or self.owner == user or Document.objects.get_docs(user).filter(id=self.id).exists()
def can_write(self, user):
perm = self.list_permissions('write')
return user.is_superuser or self.owner == user or perm.groups.filter(id__in=user.groups.all()).exists() or user in perm.users.all()
def can_read_or_exception(self, user, exception_class=PopupException):
if self.can_read(user):
return True
else:
raise exception_class(_("Document does not exist or you don't have the permission to access it."))
def can_write_or_exception(self, user, exception_class=PopupException):
if self.can_write(user):
return True
else:
raise exception_class(_("Document does not exist or you don't have the permission to access it."))
def copy(self, content_object, **kwargs):
if content_object:
copy_doc = self
for k, v in kwargs.iteritems():
if hasattr(copy_doc, k):
setattr(copy_doc, k, v)
copy_doc.pk = None
copy_doc.id = None
copy_doc = Document.objects.link(content_object,
owner=copy_doc.owner,
name=copy_doc.name,
description=copy_doc.description,
extra=copy_doc.extra)
# Update reverse Document relation to new copy
if content_object.doc.get():
content_object.doc.get().delete()
content_object.doc.add(copy_doc)
return copy_doc
else:
raise PopupException(_("Document copy method requires a content_object argument."))
@property
def icon(self):
apps = appmanager.get_apps_dict()
try:
if self.extra == 'workflow2':
return staticfiles_storage.url('oozie/art/icon_oozie_workflow_48.png')
elif self.extra == 'coordinator2':
return staticfiles_storage.url('oozie/art/icon_oozie_coordinator_48.png')
elif self.extra == 'bundle2':
return staticfiles_storage.url('oozie/art/icon_oozie_bundle_48.png')
elif self.extra == 'notebook':
return staticfiles_storage.url('spark/art/icon_spark_48.png')
elif self.extra.startswith('search'):
return staticfiles_storage.url('search/art/icon_search_48.png')
elif self.content_type.app_label == 'beeswax':
if self.extra == '0':
return staticfiles_storage.url(apps['beeswax'].icon_path)
elif self.extra == '3':
return staticfiles_storage.url(apps['spark'].icon_path)
else:
return staticfiles_storage.url(apps['impala'].icon_path)
elif self.content_type.app_label == 'oozie':
if self.extra == 'jobsub':
return staticfiles_storage.url(apps['jobsub'].icon_path)
else:
return staticfiles_storage.url(self.content_type.model_class().ICON)
elif self.content_type.app_label in apps:
return staticfiles_storage.url(apps[self.content_type.app_label].icon_path)
else:
return staticfiles_storage.url('desktop/art/icon_hue_48.png')
except Exception, e:
LOG.warn(force_unicode(e))
return staticfiles_storage.url('desktop/art/icon_hue_48.png')
def share(self, users, groups, name='read'):
DocumentPermission.objects.filter(document=self, name=name).update(users=users, groups=groups, add=True)
def unshare(self, users, groups, name='read'):
DocumentPermission.objects.filter(document=self, name=name).update(users=users, groups=groups, add=False)
def sync_permissions(self, perms_dict):
"""
Set who else or which other group can interact with the document.
Example of input: {'read': {'user_ids': [1, 2, 3], 'group_ids': [1, 2, 3]}}
"""
for name, perm in perms_dict.iteritems():
users = groups = None
if perm.get('user_ids'):
users = auth_models.User.objects.in_bulk(perm.get('user_ids'))
else:
users = []
if perm.get('group_ids'):
groups = auth_models.Group.objects.in_bulk(perm.get('group_ids'))
else:
groups = []
DocumentPermission.objects.sync(document=self, name=name, users=users, groups=groups)
def list_permissions(self, perm='read'):
return DocumentPermission.objects.list(document=self, perm=perm)
def to_dict(self):
return {
'owner': self.owner.username,
'name': self.name,
'description': self.description,
'uuid': None, # no uuid == v1
'id': self.id,
'doc1_id': self.id,
'object_id': self.object_id,
'type': str(self.content_type),
'last_modified': self.last_modified.strftime(UTC_TIME_FORMAT),
'last_modified_ts': calendar.timegm(self.last_modified.utctimetuple()),
'isSelected': False
}
class DocumentPermissionManager(models.Manager):
def _check_perm(self, name):
perms = (DocumentPermission.READ_PERM, DocumentPermission.WRITE_PERM)
if name not in perms:
perms_string = ' and '.join(', '.join(perms).rsplit(', ', 1))
raise PopupException(_('Only %s permissions are supported, not %s.') % (perms_string, name))
def share_to_default(self, document, name='read'):
from useradmin.models import get_default_user_group # Remove build dependency
self._check_perm(name)
if name == DocumentPermission.WRITE_PERM:
perm, created = DocumentPermission.objects.get_or_create(doc=document, perms=DocumentPermission.WRITE_PERM)
else:
perm, created = DocumentPermission.objects.get_or_create(doc=document, perms=DocumentPermission.READ_PERM)
default_group = get_default_user_group()
if default_group:
perm.groups.add(default_group)
def update(self, document, name='read', users=None, groups=None, add=True):
self._check_perm(name)
perm, created = DocumentPermission.objects.get_or_create(doc=document, perms=name)
if users is not None:
if add:
perm.users.add(*users)
else:
perm.users.remove(*users)
if groups is not None:
if add:
perm.groups.add(*groups)
else:
perm.groups.remove(*groups)
if not perm.users and not perm.groups:
perm.delete()
def sync(self, document, name='read', users=None, groups=None):
self._check_perm(name)
perm, created = DocumentPermission.objects.get_or_create(doc=document, perms=name)
if users is not None:
perm.users = []
perm.users = users
perm.save()
if groups is not None:
perm.groups = []
perm.groups = groups
perm.save()
if not users and not groups:
perm.delete()
def list(self, document, perm='read'):
perm, created = DocumentPermission.objects.get_or_create(doc=document, perms=perm)
return perm
class DocumentPermission(models.Model):
READ_PERM = 'read'
WRITE_PERM = 'write'
doc = models.ForeignKey(Document)
users = models.ManyToManyField(auth_models.User, db_index=True, db_table='documentpermission_users')
groups = models.ManyToManyField(auth_models.Group, db_index=True, db_table='documentpermission_groups')
perms = models.CharField(default=READ_PERM, max_length=10, choices=( # one perm
(READ_PERM, 'read'),
(WRITE_PERM, 'write'),
))
objects = DocumentPermissionManager()
class Meta:
unique_together = ('doc', 'perms')
class Document2Manager(models.Manager):
def get_by_natural_key(self, uuid, version, is_history):
return self.get(uuid=uuid, version=version, is_history=is_history)
def uuid_default():
return str(uuid.uuid4())
class Document2(models.Model):
owner = models.ForeignKey(auth_models.User, db_index=True, verbose_name=_t('Owner'), help_text=_t('Creator.'), related_name='doc2_owner')
name = models.CharField(default='', max_length=255)
description = models.TextField(default='')
uuid = models.CharField(default=uuid_default, max_length=36, db_index=True)
type = models.CharField(default='', max_length=32, db_index=True, help_text=_t('Type of document, e.g. Hive query, Oozie workflow, Search Dashboard...'))
data = models.TextField(default='{}')
extra = models.TextField(default='')
last_modified = models.DateTimeField(auto_now=True, db_index=True, verbose_name=_t('Time last modified'))
version = models.SmallIntegerField(default=1, verbose_name=_t('Document version'), db_index=True)
is_history = models.BooleanField(default=False, db_index=True)
tags = models.ManyToManyField('self', db_index=True)
dependencies = models.ManyToManyField('self', db_index=True)
doc = generic.GenericRelation(Document, related_name='doc_doc') # Compatibility with Hue 3
objects = Document2Manager()
class Meta:
unique_together = ('uuid', 'version', 'is_history')
def natural_key(self):
return (self.uuid, self.version, self.is_history)
@property
def data_dict(self):
if not self.data:
self.data = json.dumps({})
data_python = json.loads(self.data)
return data_python
def copy(self, **kwargs):
copy_doc = self
for k, v in kwargs.iteritems():
if hasattr(copy_doc, k):
setattr(copy_doc, k, v)
copy_doc.pk = None
copy_doc.id = None
copy_doc.uuid = str(uuid.uuid4())
copy_doc.save()
return copy_doc
def update_data(self, post_data):
data_dict = self.data_dict
data_dict.update(post_data)
self.data = json.dumps(data_dict)
def get_absolute_url(self):
if self.type == 'oozie-coordinator2':
return reverse('oozie:edit_coordinator') + '?coordinator=' + str(self.id)
elif self.type == 'oozie-bundle2':
return reverse('oozie:edit_bundle') + '?bundle=' + str(self.id)
elif self.type == 'notebook':
return reverse('spark:editor') + '?notebook=' + str(self.id)
elif self.type == 'search-dashboard':
return reverse('search:index') + '?collection=' + str(self.id)
else:
return reverse('oozie:edit_workflow') + '?workflow=' + str(self.id)
def to_dict(self):
return {
'owner': self.owner.username,
'name': self.name,
'description': self.description,
'uuid': self.uuid,
'id': self.id,
'doc1_id': self.doc.get().id if self.doc.exists() else -1,
'type': self.type,
'last_modified': self.last_modified.strftime(UTC_TIME_FORMAT),
'last_modified_ts': calendar.timegm(self.last_modified.utctimetuple()),
'isSelected': False,
'absoluteUrl': self.get_absolute_url()
}
def can_read_or_exception(self, user):
self.doc.get().can_read_or_exception(user)
| {
"content_hash": "7bccac595e0262e87375f5b9b622ffd6",
"timestamp": "",
"source": "github",
"line_count": 791,
"max_line_length": 180,
"avg_line_length": 34.29709228824273,
"alnum_prop": 0.6574882966567143,
"repo_name": "javachengwc/hue",
"id": "7b5f78cbea172d7c4f0259c6906be46cff85ea9d",
"size": "27921",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/src/desktop/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2391760"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "423513"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21113316"
},
{
"name": "Java",
"bytes": "133906"
},
{
"name": "JavaScript",
"bytes": "2805608"
},
{
"name": "Makefile",
"bytes": "93726"
},
{
"name": "Mako",
"bytes": "2163111"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "32856304"
},
{
"name": "Scala",
"bytes": "159673"
},
{
"name": "Shell",
"bytes": "51345"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "101737"
},
{
"name": "XSLT",
"bytes": "357625"
}
],
"symlink_target": ""
} |
from bson.objectid import ObjectId
__author__ = 'Rafael'
class CategoryDAO:
def __init__(self, database):
self.db = database
self.categories = self.db.categories
def add(self, category):
try:
self.categories.insert_one(category)
except Exception as e:
print "Deu enrrosco na insercao! Mensagem de erro: " + e.message
return False
return True
def get_by_category_type(self, category_type):
return self.categories.find({'category_type':category_type})
def get(self, id):
try:
return self.categories.find_one({'_id': ObjectId(id)})
except Exception as e:
return "Erro ao buscar o item! Mensagem de erro: " + e.message
def get_by_id(self, category_id):
return self.categories.find_one({'_id': ObjectId(category_id)})
def remove(self, category_id):
try:
self.categories.remove({'_id': ObjectId(category_id)})
except Exception as e:
print 'erro na exclusao: ' + e.message
def update(self, category):
category_id = ObjectId(category['_id'])
del category['_id']
try:
self.categories.update_one({'_id': category_id}, {"$set": category}, upsert=False)
except Exception as e:
print 'erro na atualizacao: ' + ' - ' + e.message
@property
def get_all(self):
return self.categories.find({})
| {
"content_hash": "66f187d114b296d5b1d9495e08312ffa",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 94,
"avg_line_length": 28.134615384615383,
"alnum_prop": 0.5864661654135338,
"repo_name": "rafaeljec/financialcontroller",
"id": "9b4cae6173e1e1dd3051b020f4916b258c2fe1b2",
"size": "1463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "categoryDAO.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "28412"
},
{
"name": "Python",
"bytes": "17773"
}
],
"symlink_target": ""
} |
from __future__ import division
import copy
import os.path
from vistrails.core.system import current_user, current_time
from vistrails.core.mashup.alias import Alias
from vistrails.core.mashup.component import Component
from vistrails.core.mashup.mashup import Mashup
class MashupController(object):
def __init__(self, originalController, vt_controller, vt_version, mshptrail=None):
self.vtController = vt_controller
self.originalController = originalController
self.vtVersion = vt_version
self.vtPipeline = self.vtController.vistrail.getPipeline(self.vtVersion)
self.vtPipeline.validate()
self.mshptrail = mshptrail
self.id_scope = mshptrail.id_scope
self.currentVersion = -1
self.currentMashup = None
self._changed = False
def setChanged(self, on):
self._changed = on
self.originalController.set_changed(True)
def setCurrentVersion(self, version):
self.currentVersion = version
self.vtPipeline = self.vtController.vistrail.getPipeline(self.vtVersion)
if version > -1:
self.currentMashup = self.mshptrail.getMashup(version)
self.updatePipelineAliasesFromCurrentMashup()
def getVistrailParam(self, alias):
if self.vtPipeline:
return self.vtPipeline.db_get_object(alias.component.vttype,
alias.component.vtid)
return None
def execute(self, params):
if self.vtPipeline and self.vtController:
mashup_id = self.mshptrail.id
mashup_version = self.currentVersion
reason = "mashup::%s::%s"%(str(mashup_id), mashup_version)
result = self.vtController.execute_current_workflow(custom_params=params,
reason=reason)
self.originalController.set_changed(True)
return result
return ([], False)
def updateCurrentTag(self, name):
if self.mshptrail.changeTag(self.currentVersion, name, current_user(),
current_time()):
self.setChanged(True)
return True
else:
return False
def moveTag(self, from_version, to_version, name):
tag = self.mshptrail.getTagForActionId(from_version)
if tag:
self.mshptrail.removeTagByActionId(from_version)
self.mshptrail.addTag(to_version, tag, user=current_user(),
date=current_time())
def getCurrentTag(self):
return self.mshptrail.getTagForActionId(self.currentVersion)
def versionHasTag(self, version):
return self.mshptrail.hasTagForActionId(version)
def hasTagWithName(self, name):
return self.mshptrail.hasTagWithName(name)
def getVistrailName(self):
name = ''
locator = self.currentMashup.vtid
if locator is not None:
if locator.name is None:
name = ''
else:
name = os.path.split(locator.name)[1]
if name == '':
name = self.controller.vtController.name
return name
def resetVistrailPipeline(self):
self.vtController.change_selected_version(self.vtVersion)
def getVistrailWorkflowTag(self):
return self.vtController.get_pipeline_name(self.vtVersion)
def reorderAliases(self, new_order):
if self.currentMashup:
new_aliases = []
pos = 0
for old_pos in new_order:
alias = self.currentMashup.alias_list[old_pos].do_copy(
new_ids=True, id_scope=self.mshptrail.id_scope,
id_remap={})
alias.component.pos = pos
new_aliases.append(alias)
pos += 1
return self.createMashupVersion(new_aliases, quiet=False)
def updateAlias(self, alias):
"""updateAlias(alias)-> long
This will create a version with an alias change (can't be a position
change). Position changes are taken care in reorderAliases method.
"""
#print " controller updateAlias ", alias
new_aliases = []
if self.currentMashup:
for a in self.currentMashup.alias_list:
if a.id != alias.id:
calias = a.do_copy(new_ids=True,
id_scope=self.mshptrail.id_scope,
id_remap={})
else:
#print "found alias: ", a
calias = alias.do_copy(new_ids=True,
id_scope=self.mshptrail.id_scope,
id_remap={})
new_aliases.append(calias)
return self.createMashupVersion(new_aliases, quiet=False)
def updateAliasFromParam(self, param):
add_alias = True
new_aliases = []
pos = 0
for alias in self.currentMashup.alias_list:
if alias.component.vtid != param.id:
calias = alias.do_copy(new_ids=True,
id_scope=self.mshptrail.id_scope,
id_remap={})
calias.component.pos = pos
new_aliases.append(calias)
pos += 1
else:
#print "found alias: ", alias
add_alias = False
if param.alias != '':
new_alias = alias.do_copy(new_ids=True,
id_scope=self.mshptrail.id_scope,
id_remap={})
new_alias.name = param.alias
new_aliases.append(new_alias)
pos += 1
if add_alias:
parameter = self.vtPipeline.db_get_object(param.dbtype, param.id)
cid = self.id_scope.getNewId('mashup_component')
aid = self.id_scope.getNewId('mashup_alias')
component = Component(cid, parameter.vtType,
parameter.real_id, param.parent_dbtype,
param.parent_id,
param.mId, parameter.type,
parameter.strValue, parameter.pos,
pos, "")
alias = Alias(aid, param.alias, component)
new_aliases.append(alias)
self.vtPipeline.add_alias(param.alias, param.type, param.id,
param.parent_dbtype, param.parent_id,
param.mId)
else:
self.vtPipeline.change_alias(param.alias, param.type, param.id,
param.parent_dbtype, param.parent_id,
param.mId)
return self.createMashupVersion(new_aliases, quiet=False)
def updateAliasesFromPipeline(self, pipeline):
"""updateAliasesFromPipeline(self, pipeline) -> long
This will generate a new mashup by updating the aliases of the current
mashup according to the aliases in a pipeline. This assumes that the
mashup's current aliases are different from pipeline aliases by at most
one change (eg., an alias rename, an alias addition, an alias removal)
"""
pip_aliases = pipeline.aliases.keys()
mashup_aliases = [a.name for a in self.currentMashup.alias_list]
new_aliases = []
if len(pip_aliases) == len(mashup_aliases):
#an alias probably changed its name or its value
old_a = None
new_a = None
for a in self.currentMashup.alias_list:
if a.name not in pip_aliases:
old_a = a.do_copy(new_ids=True,
id_scope=self.mshptrail.id_scope,
id_remap={})
new_aliases.append(old_a)
else:
new_aliases.append(a)
for a in pip_aliases:
if a not in mashup_aliases:
new_a = (a, pipeline.aliases[a])
if old_a is not None and new_a is not None:
(a, info) = new_a
parameter = pipeline.db_get_object(info[0],info[1])
old_a.name = a
old_a.component.vttype = parameter.vtType
old_a.component.vtid = parameter.real_id
old_a.component.vtparent_type = info[2]
old_a.component.vt_parent_id = info[3]
old_a.component.mid = info[4]
old_a.component.type = parameter.type
old_a.component.val = parameter.strValue
old_a.component.vtpos = parameter.pos
elif len(pip_aliases) < len(mashup_aliases):
# an alias was removed
pos = 0
for a in self.currentMashup.alias_list:
if a.name in pip_aliases:
alias = a.do_copy(new_ids=True,
id_scope=self.mshptrail.id_scope,
id_remap={})
alias.component.pos = pos
new_aliases.append(alias)
pos += 1
else:
#an alias was added
pos = len(mashup_aliases)
new_aliases = [a for a in self.currentMashup.alias_list]
for a in pip_aliases:
if a not in mashup_aliases:
info = pipeline.aliases[a]
parameter = pipeline.db_get_object(info[0],info[1])
cid = self.id_scope.getNewId('mashup_component')
aid = self.id_scope.getNewId('mashup_alias')
component = Component(cid, parameter.vtType,
parameter.real_id, info[2], info[3],
info[4], parameter.type,
parameter.strValue, parameter.pos,
pos, "")
alias = Alias(aid, a, component)
new_aliases.append(alias)
pos += 1
return self.createMashupVersion(new_aliases, quiet=False)
def updatePipelineAliasesFromCurrentMashup(self):
self.resetVistrailPipeline()
self.vtPipeline = copy.copy(self.vtController.current_pipeline)
#first we clear all aliases in pipeline
to_remove = self.vtPipeline.aliases.values()
for (type, oId, parentType, parentId, mid) in to_remove:
self.vtPipeline.remove_alias(type, oId, parentType, parentId, mid)
parameter = self.vtPipeline.db_get_object(type,oId)
parameter.alias = ''
#now we populate the pipeline according to the aliases in the mashup
for alias in self.currentMashup.alias_list:
self.vtPipeline.add_alias(alias.name, alias.component.vttype,
alias.component.vtid,
alias.component.vtparent_type,
alias.component.vtparent_id,
alias.component.vtmid)
parameter = self.vtPipeline.db_get_object(alias.component.vttype,
alias.component.vtid)
parameter.alias = alias.name
def getMashupName(self, version=-1):
action_map = self.mshptrail.actionMap
if version == -1:
version = self.currentVersion
count = 0
while True:
hasTag = self.mshptrail.hasTagForActionId(version)
if hasTag or version <= 1:
if hasTag:
name = self.mshptrail.getTagForActionId(version)
else:
name = "ROOT"
count_str = ""
if count > 0:
count_str = " + " + str(count)
return name + count_str
version = action_map[version].parent_id
count += 1
def findFirstTaggedParent(self, version):
action_map = self.mshptrail.actionMap
version = action_map[version].parent_id
while True:
hasTag = self.mshptrail.hasTagForActionId(version)
if hasTag or version <= 1:
name = ""
if hasTag:
name = self.mshptrail.getTagForActionId(version)
return (version, name)
version = action_map[version].parent_id
def removeAlias(self, name):
"""removeAlias(name: str) -> long
This will create a new version of the mashup without alias name, add it
to the trail and set the version as the current version. It will return
the version number
"""
new_aliases = []
if self.currentMashup:
pos = 0
for alias in self.currentMashup.alias_list:
if alias.name != name:
calias = alias.do_copy(new_ids=True,
id_scope=self.mshptrail.id_scope,
id_remap={})
calias.component.pos = pos
new_aliases.append(calias)
pos += 1
return self.createMashupVersion(alias_list=new_aliases, quiet=False)
def createMashupVersion(self, alias_list, quiet=False):
id = self.id_scope.getNewId('mashup')
mashup = Mashup(id=id, name="mashup%s"%id,
vtid=self.currentMashup.vtid,
version=self.currentMashup.version,
alias_list=alias_list)
currVersion = self.mshptrail.addVersion(parent_id=self.currentVersion,
mashup=mashup,
user=current_user(),
date=current_time())
self.mshptrail.currentVersion = currVersion
self.currentMashup = mashup
#print "created new mashup ", currVersion
self.setCurrentVersion(currVersion, quiet)
self.setChanged(True)
return currVersion
| {
"content_hash": "20521e50b635a5635181466de8db6dea",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 86,
"avg_line_length": 44.53333333333333,
"alnum_prop": 0.5174877517691889,
"repo_name": "VisTrails/VisTrails",
"id": "2b376788f62d231af5dd9e7ca8ca9d75191348d4",
"size": "16609",
"binary": false,
"copies": "2",
"ref": "refs/heads/v2.2",
"path": "vistrails/core/mashup/controller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19779006"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "SQLPL",
"bytes": "2323"
},
{
"name": "Shell",
"bytes": "26542"
},
{
"name": "TeX",
"bytes": "147247"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import subprocess
import os
import os.path
import shlex
import sys
from subprocess import *
import shutil
import uuid
import glob
from common import DeviceItem
import traceback
class DiskUtil(object):
def __init__(self, patching, logger):
self.patching = patching
self.logger = logger
def get_device_items_property(self, lsblk_path, dev_name, property_name):
get_property_cmd = lsblk_path + " /dev/" + dev_name + " -b -nl -o NAME," + property_name
get_property_cmd_args = shlex.split(get_property_cmd)
get_property_cmd_p = Popen(get_property_cmd_args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output,err = get_property_cmd_p.communicate()
lines = output.splitlines()
for i in range(0,len(lines)):
item_value_str = lines[i].strip()
if(item_value_str != ""):
disk_info_item_array = item_value_str.split()
if(dev_name == disk_info_item_array[0]):
if(len(disk_info_item_array) > 1):
return disk_info_item_array[1]
return None
def get_device_items_sles(self,dev_path):
self.logger.log("get_device_items_sles : getting the blk info from " + str(dev_path), True)
device_items = []
#first get all the device names
if(dev_path is None):
get_device_cmd = self.patching.lsblk_path + " -b -nl -o NAME"
else:
get_device_cmd = self.patching.lsblk_path + " -b -nl -o NAME " + dev_path
get_device_cmd_args = shlex.split(get_device_cmd)
p = Popen(get_device_cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_lsblk_output, err = p.communicate()
lines = out_lsblk_output.splitlines()
for i in range(0,len(lines)):
item_value_str = lines[i].strip()
if(item_value_str != ""):
disk_info_item_array = item_value_str.split()
device_item = DeviceItem()
device_item.name = disk_info_item_array[0]
device_items.append(device_item)
for i in range(0,len(device_items)):
device_item = device_items[i]
device_item.file_system = self.get_device_items_property(lsblk_path=self.patching.lsblk_path,dev_name=device_item.name,property_name='FSTYPE')
device_item.mount_point = self.get_device_items_property(lsblk_path=self.patching.lsblk_path,dev_name=device_item.name,property_name='MOUNTPOINT')
device_item.label = self.get_device_items_property(lsblk_path=self.patching.lsblk_path,dev_name=device_item.name,property_name='LABEL')
device_item.uuid = self.get_device_items_property(lsblk_path=self.patching.lsblk_path,dev_name=device_item.name,property_name='UUID')
#get the type of device
model_file_path = '/sys/block/' + device_item.name + '/device/model'
if(os.path.exists(model_file_path)):
with open(model_file_path,'r') as f:
device_item.model = f.read().strip()
if(device_item.model == 'Virtual Disk'):
self.logger.log("model is virtual disk", True)
device_item.type = 'disk'
if(device_item.type != 'disk'):
partition_files = glob.glob('/sys/block/*/' + device_item.name + '/partition')
if(partition_files is not None and len(partition_files) > 0):
self.logger.log("partition files exists", True)
device_item.type = 'part'
return device_items
def get_device_items_from_lsblk_list(self, lsblk_path, dev_path):
self.logger.log("get_device_items_from_lsblk_list : getting the blk info from " + str(dev_path), True)
device_items = []
#first get all the device names
if(dev_path is None):
get_device_cmd = lsblk_path + " -b -nl -o NAME"
else:
get_device_cmd = lsblk_path + " -b -nl -o NAME " + dev_path
get_device_cmd_args = shlex.split(get_device_cmd)
p = Popen(get_device_cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_lsblk_output, err = p.communicate()
lines = out_lsblk_output.splitlines()
device_items_temp = []
for i in range(0,len(lines)):
item_value_str = lines[i].strip()
if(item_value_str != ""):
disk_info_item_array = item_value_str.split()
device_item = DeviceItem()
device_item.name = disk_info_item_array[0]
device_items_temp.append(device_item)
for i in range(0,len(device_items_temp)):
device_item = device_items_temp[i]
device_item.mount_point = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='MOUNTPOINT')
if (device_item.mount_point is not None):
device_item.file_system = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='FSTYPE')
device_item.label = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='LABEL')
device_item.uuid = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='UUID')
device_item.type = self.get_device_items_property(lsblk_path=lsblk_path,dev_name=device_item.name,property_name='TYPE')
device_items.append(device_item)
self.logger.log("lsblk MOUNTPOINT=" + str(device_item.mount_point) + ", NAME=" + str(device_item.name) + ", TYPE=" + str(device_item.type) + ", FSTYPE=" + str(device_item.file_system) + ", LABEL=" + str(device_item.label) + ", UUID=" + str(device_item.uuid) + ", MODEL=" + str(device_item.model), True)
return device_items
def get_lsblk_pairs_output(self, lsblk_path, dev_path):
self.logger.log("get_lsblk_pairs_output : getting the blk info from " + str(dev_path) + " using lsblk_path " + str(lsblk_path), True)
out_lsblk_output = None
error_msg = None
is_lsblk_path_wrong = False
try:
if(dev_path is None):
p = Popen([str(lsblk_path), '-b', '-n','-P','-o','NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
p = Popen([str(lsblk_path), '-b', '-n','-P','-o','NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL,SIZE',dev_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
errMsg = 'Exception in lsblk command, error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
is_lsblk_path_wrong = True
if is_lsblk_path_wrong == False :
out_lsblk_output, err = p.communicate()
out_lsblk_output = str(out_lsblk_output)
error_msg = str(err)
if(error_msg is not None and error_msg.strip() != ""):
self.logger.log(str(err), True)
return is_lsblk_path_wrong, out_lsblk_output, error_msg
def get_which_command_result(self, program_to_locate):
self.logger.log("getting the which info for " + str(program_to_locate), True)
out_which_output = None
error_msg = None
try:
p = Popen(['which', str(program_to_locate)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_which_output, err = p.communicate()
out_which_output = str(out_which_output)
error_msg = str(err)
if(error_msg is not None and error_msg.strip() != ""):
self.logger.log(str(err), True)
self.logger.log("which command result :" + str(out_which_output), True)
if (out_which_output is not None):
out_which_output = out_which_output.splitlines()[0]
except Exception as e:
errMsg = 'Exception in which command, error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
return out_which_output, error_msg
def get_device_items(self, dev_path):
if(self.patching.distro_info[0].lower() == 'suse' and self.patching.distro_info[1] == '11'):
return self.get_device_items_sles(dev_path)
else:
self.logger.log("getting the blk info from " + str(dev_path), True)
device_items = []
lsblk_path = self.patching.lsblk_path
# Get lsblk command output using lsblk_path as self.patching.lsblk_path
is_lsblk_path_wrong, out_lsblk_output, error_msg = self.get_lsblk_pairs_output(lsblk_path, dev_path)
# if lsblk_path was wrong, use /bin/lsblk or usr/bin/lsblk based on self.patching.usr_flag to get lsblk command output again for centos/redhat distros
if (is_lsblk_path_wrong == True) and (self.patching.distro_info[0].lower() == 'centos' or self.patching.distro_info[0].lower() == 'redhat'):
if self.patching.usr_flag == 1:
self.logger.log("lsblk path is wrong, removing /usr prefix", True, 'Warning')
lsblk_path = "/bin/lsblk"
else:
self.logger.log("lsblk path is wrong, adding /usr prefix", True, 'Warning')
lsblk_path = "/usr/bin/lsblk"
is_lsblk_path_wrong, out_lsblk_output, error_msg = self.get_lsblk_pairs_output(lsblk_path, dev_path)
# if lsblk_path was still wrong, lsblk_path using "which" command
if (is_lsblk_path_wrong == True):
self.logger.log("lsblk path is wrong. finding path using which command", True, 'Warning')
out_which_output, which_error_msg = self.get_which_command_result('lsblk')
# get lsblk command output
if (out_which_output is not None):
lsblk_path = str(out_which_output)
is_lsblk_path_wrong, out_lsblk_output, error_msg = self.get_lsblk_pairs_output(lsblk_path, dev_path)
# if error_msg contains "invalid option", then get device_items using method get_device_items_from_lsblk_list
if (error_msg is not None and error_msg.strip() != "" and 'invalid option' in error_msg):
device_items = self.get_device_items_from_lsblk_list(lsblk_path, dev_path)
# else get device_items from parsing the lsblk command output
elif (out_lsblk_output is not None):
lines = out_lsblk_output.splitlines()
for i in range(0,len(lines)):
item_value_str = lines[i].strip()
if(item_value_str != ""):
disk_info_item_array = item_value_str.split()
device_item = DeviceItem()
disk_info_item_array_length = len(disk_info_item_array)
for j in range(0, disk_info_item_array_length):
disk_info_property = disk_info_item_array[j]
property_item_pair = disk_info_property.split('=')
if(property_item_pair[0] == 'NAME'):
device_item.name = property_item_pair[1].strip('"')
if(property_item_pair[0] == 'TYPE'):
device_item.type = property_item_pair[1].strip('"')
if(property_item_pair[0] == 'FSTYPE'):
device_item.file_system = property_item_pair[1].strip('"')
if(property_item_pair[0] == 'MOUNTPOINT'):
device_item.mount_point = property_item_pair[1].strip('"')
if(property_item_pair[0] == 'LABEL'):
device_item.label = property_item_pair[1].strip('"')
if(property_item_pair[0] == 'UUID'):
device_item.uuid = property_item_pair[1].strip('"')
if(property_item_pair[0] == 'MODEL'):
device_item.model = property_item_pair[1].strip('"')
self.logger.log("lsblk MOUNTPOINT=" + str(device_item.mount_point) + ", NAME=" + str(device_item.name) + ", TYPE=" + str(device_item.type) + ", FSTYPE=" + str(device_item.file_system) + ", LABEL=" + str(device_item.label) + ", UUID=" + str(device_item.uuid) + ", MODEL=" + str(device_item.model), True)
if(device_item.mount_point is not None and device_item.mount_point != "" and device_item.mount_point != " "):
device_items.append(device_item)
return device_items
def get_mount_command_output(self, mount_path):
self.logger.log("getting the mount info using mount_path " + str(mount_path), True)
out_mount_output = None
error_msg = None
is_mount_path_wrong = False
try:
p = Popen([str(mount_path)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
errMsg = 'Exception in mount command, error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
is_mount_path_wrong = True
if is_mount_path_wrong == False :
out_mount_output, err = p.communicate()
out_mount_output = str(out_mount_output)
error_msg = str(err)
if(error_msg is not None and error_msg.strip() != ""):
self.logger.log(str(err), True)
return is_mount_path_wrong, out_mount_output, error_msg
def get_mount_points(self):
# Get the output on the mount command
self.logger.log("getting the mount-points info using mount command ", True)
mount_points = []
fs_types = []
mount_path = self.patching.mount_path
is_mount_path_wrong, out_mount_output, error_msg = self.get_mount_command_output(mount_path)
if (is_mount_path_wrong == True):
if self.patching.usr_flag == 1:
self.logger.log("mount path is wrong.removing /usr prefix", True, 'Warning')
mount_path = "/bin/mount"
else:
self.logger.log("mount path is wrong.Adding /usr prefix", True, 'Warning')
mount_path = "/usr/bin/mount"
is_mount_path_wrong, out_mount_output, error_msg = self.get_mount_command_output(mount_path)
# if mount_path was still wrong, mount_path using "which" command
if (is_mount_path_wrong == True):
self.logger.log("mount path is wrong. finding path using which command", True, 'Warning')
out_which_output, which_error_msg = self.get_which_command_result('mount')
# get mount command output
if (out_which_output is not None):
mount_path = str(out_which_output)
is_mount_path_wrong, out_mount_output, error_msg = self.get_mount_command_output(mount_path)
if (out_mount_output is not None):
#Extract the list of mnt_point in order
lines = out_mount_output.splitlines()
for line in lines:
line = line.strip()
if(line != ""):
mountPrefixStr = " on /"
prefixIndex = line.find(mountPrefixStr)
if(prefixIndex >= 0):
mountpointStart = prefixIndex + len(mountPrefixStr) - 1
fstypePrefixStr = " type "
mountpointEnd = line.find(fstypePrefixStr, mountpointStart)
if(mountpointEnd >= 0):
mount_point = line[mountpointStart:mountpointEnd]
fs_type = ""
fstypeStart = line.find(fstypePrefixStr) + len(fstypePrefixStr) - 1
if(line.find(fstypePrefixStr) >= 0):
fstypeEnd = line.find(" ", fstypeStart+1)
if(fstypeEnd >=0):
fs_type = line[fstypeStart+1:fstypeEnd]
# If there is a duplicate, keep only the first instance
if(mount_point not in mount_points):
self.logger.log("mount command mount :" + str(mount_point) + ": and fstype :"+ str(fs_type) + ":", True)
fs_types.append(fs_type)
mount_points.append(mount_point)
return mount_points, fs_types
| {
"content_hash": "52e759e1dd8f062ed0e2da3f5dbcfb9a",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 326,
"avg_line_length": 59.13380281690141,
"alnum_prop": 0.5668095748481601,
"repo_name": "krkhan/azure-linux-extensions",
"id": "da5f6e952c5726a934f0923e6d1bf7c6e7d9ac30",
"size": "17431",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "VMBackup/main/DiskUtil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39379"
},
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Makefile",
"bytes": "4033"
},
{
"name": "PowerShell",
"bytes": "24124"
},
{
"name": "Python",
"bytes": "3896274"
},
{
"name": "Shell",
"bytes": "21864"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0); | {
"content_hash": "f71e585f35ff8f2f5ac9b986f8e2c22d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 164,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.7045454545454546,
"repo_name": "antoinecarme/pyaf",
"id": "915af4f7874b3ad35d70131e7567abb299393f7b",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_5/ar_/test_artificial_128_Anscombe_Lag1Trend_5__100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from models import *
class BlockInline(admin.TabularInline):
model = Block
class SectionAdmin(admin.ModelAdmin):
inlines = [
BlockInline
]
admin.site.register(Block)
admin.site.register(Section, SectionAdmin)
admin.site.register(Template)
| {
"content_hash": "2fb6211542569f5f83577cf1cec4f5ec",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 42,
"avg_line_length": 17.6875,
"alnum_prop": 0.7809187279151943,
"repo_name": "igudym/twango",
"id": "059c1601cfa1f16abd8b07ebe0e0ebcfe157796b",
"size": "283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twango/template/default/src/apps/grid_designer/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "201"
},
{
"name": "Python",
"bytes": "29776"
},
{
"name": "Shell",
"bytes": "101"
}
],
"symlink_target": ""
} |
import unittest
from google.appengine.ext import testbed
from application import app
class AppEngineTestCase(unittest.TestCase):
def setUp(self):
# Flask apps testing. See: http://flask.pocoo.org/docs/testing/
app.config['TESTING'] = True
app.config['CSRF_ENABLED'] = False
self.app = app.test_client()
# setup app engine test bed
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_user_stub()
def tearDown(self):
self.testbed.deactivate()
| {
"content_hash": "514879bddc158ffdf4040d73f78b7063",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.6570945945945946,
"repo_name": "rhololkeolke/apo-website",
"id": "ce03302d80c5264817f3efbc041eb4ee8d1c7108",
"size": "592",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/tests/harness.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "84190"
},
{
"name": "Python",
"bytes": "1839674"
}
],
"symlink_target": ""
} |
from random import getrandbits
from datetime import datetime, timedelta
import globus_sdk
from globus_sdk.exc import TransferAPIError
from tests.framework.constants import (GO_EP1_ID,
SDKTESTER1A_NATIVE1_TRANSFER_RT,
SDKTESTER2B_NATIVE1_TRANSFER_RT)
from tests.framework.capturedio_testcase import CapturedIOTestCase
from tests.framework.tools import get_client_data
def cleanSharing(tc):
"""
Cleans out any files in ~/.globus/sharing/ on go#ep1 older than an hour
TODO: remove this once deleting shared directories does full cleanup
"""
path = "~/.globus/sharing/"
hour_ago = datetime.utcnow() - timedelta(hours=1)
filter_string = (
"last_modified:," + hour_ago.strftime("%Y-%m-%d %H:%M:%S"))
try:
old_files = tc.operation_ls(
GO_EP1_ID, path=path, filter=filter_string)
except TransferAPIError: # no .globus dir exists
return
ddata = globus_sdk.DeleteData(tc, GO_EP1_ID, notify_on_fail=False,
notify_on_succeeded=False)
for item in old_files:
ddata.add_item(path + item["name"])
if len(ddata["DATA"]):
tc.submit_delete(ddata)
class TransferClientTestCase(CapturedIOTestCase):
"""
Class that has general setUp and tearDown methods for all classes
that tests the transfer client.
"""
__test__ = False # prevents base class from trying to run tests
@classmethod
def setUpClass(self):
"""
Does an auth flow to create an authorized client for
sdktester1a and sdktester2b
Cleans out any old sharing files before running tests
"""
ac = globus_sdk.NativeAppAuthClient(
client_id=get_client_data()["native_app_client1"]["id"])
authorizer1 = globus_sdk.RefreshTokenAuthorizer(
SDKTESTER1A_NATIVE1_TRANSFER_RT, ac)
self.tc = globus_sdk.TransferClient(authorizer=authorizer1)
authorizer2 = globus_sdk.RefreshTokenAuthorizer(
SDKTESTER2B_NATIVE1_TRANSFER_RT, ac)
self.tc2 = globus_sdk.TransferClient(authorizer=authorizer2)
cleanSharing(self.tc)
def setUp(self):
"""
Creates a list for tracking cleanup of assets created during testing
Sets up a test endpoint
"""
super(TransferClientTestCase, self).setUp()
# list of dicts, each containing a function and a list of args
# to pass to that function s.t. calling f(*args) cleans an asset
self.asset_cleanup = []
# test endpoint, uses 128 bits of randomness to prevent collision
data = {"display_name": "SDK Test Endpoint-" + str(getrandbits(128)),
"description": "Endpoint for testing the SDK"}
r = self.tc.create_endpoint(data)
self.test_ep_id = r["id"]
self.asset_cleanup.append({"function": self.tc.delete_endpoint,
"args": [r["id"]],
"name": "test_ep"}) # for ease of removal
def tearDown(self):
"""
Parses created_assets to destroy all assets created during testing
"""
super(TransferClientTestCase, self).tearDown()
# call the cleanup functions with the arguments they were given
for cleanup in self.asset_cleanup:
cleanup["function"](*cleanup["args"])
def deleteHelper(self, ep_id, path):
"""
Helper function for cleanup. Deletes by path and endpoint,
"""
kwargs = {"notify_on_succeeded": False} # prevent email spam
ddata = globus_sdk.DeleteData(self.tc, ep_id,
label="deleteHelper",
recursive=True, **kwargs)
ddata.add_item(path)
self.tc.submit_delete(ddata)
| {
"content_hash": "aba5cde4f4351f860e050381c69b2c85",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 38.81372549019608,
"alnum_prop": 0.5991411972720384,
"repo_name": "aaschaer/globus-sdk-python",
"id": "bb57621c99b4d1dda0de098324fb58ddfd243dbd",
"size": "3959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/framework/transfer_client_testcase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1029"
},
{
"name": "Makefile",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "450613"
}
],
"symlink_target": ""
} |
from Expressions.Expr import Expr
class Number(Expr):
def __init__(self, value):
self.value = value
def eval(self):
return self.value
def type(self):
return Number
def copy(self):
return Number(self.eval())
def to_cli(self):
return str(self.eval()) + " (Number)"
def min(self):
return Number(self.eval())
@staticmethod
def data():
return "Number"
class NaN(Expr):
def __init__(self):
return
def eval(self):
raise Exception("Forced calculation with NaN.")
def to_cli(self):
return "NaN"
@staticmethod
def data():
return "NaN" | {
"content_hash": "e6b54debcedabd3ec944765198561e52",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 55,
"avg_line_length": 16.536585365853657,
"alnum_prop": 0.556047197640118,
"repo_name": "mathiasquintero/LlamaLang",
"id": "db91758f723b9e92698719987e867dc150ad03f8",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Expressions/Number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41836"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
import uuid
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for xform in orm.XForm.objects.all():
xform.uuid = uuid.uuid4().hex
xform.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['odk_logger']
| {
"content_hash": "7e10015289a963d042c4b868d3d55087",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 182,
"avg_line_length": 70.38613861386139,
"alnum_prop": 0.5469123646082431,
"repo_name": "ehealthafrica-ci/formhub",
"id": "194bcd1a962cce9773503815bb9a6f5fb81aed86",
"size": "7127",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "odk_logger/migrations/0011_add_uuid.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "60276"
},
{
"name": "HTML",
"bytes": "251331"
},
{
"name": "JavaScript",
"bytes": "722151"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Nginx",
"bytes": "793"
},
{
"name": "Python",
"bytes": "1650038"
},
{
"name": "Shell",
"bytes": "11919"
}
],
"symlink_target": ""
} |
"""Contains the logic for `aq del city`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.processes import DSDBRunner
from aquilon.worker.dbwrappers.location import get_location
from aquilon.worker.templates.city import PlenaryCity
from aquilon.worker.locks import lock_queue
from aquilon.worker.commands.del_location import CommandDelLocation
class CommandDelCity(CommandDelLocation):
required_parameters = ["city"]
def render(self, session, logger, city, **arguments):
dbcity = get_location(session, city=city)
name = dbcity.name
country = dbcity.country.name
fullname = dbcity.fullname
plenary = PlenaryCity(dbcity, logger=logger)
CommandDelLocation.render(self, session=session, name=city,
type='city', **arguments)
session.flush()
key = plenary.get_remove_key()
try:
lock_queue.acquire(key)
plenary.remove(locked=True)
dsdb_runner = DSDBRunner(logger=logger)
dsdb_runner.del_city(name, country, fullname)
dsdb_runner.commit_or_rollback()
except:
plenary.restore_stash()
raise
finally:
lock_queue.release(key)
return
| {
"content_hash": "cf78d82b39265e5dcecfe8a1497e0e16",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 72,
"avg_line_length": 32,
"alnum_prop": 0.6509146341463414,
"repo_name": "stdweird/aquilon",
"id": "600735666b83ff19889f4cfaf3742772a03e4f7b",
"size": "2020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.6/aquilon/worker/commands/del_city.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
} |
import urllib
import urllib2
from lxml import html
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class TestDashboardBasicOps(manager.ScenarioTest):
"""
This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
@classmethod
def resource_setup(cls):
cls.set_network_resources()
super(TestDashboardBasicOps, cls).resource_setup()
if not CONF.service_available.horizon:
raise cls.skipException("Horizon support is required")
def check_login_page(self):
response = urllib2.urlopen(CONF.dashboard.dashboard_url)
self.assertIn("<h3>Log In</h3>", response.read())
def user_login(self):
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
response = self.opener.open(CONF.dashboard.dashboard_url).read()
# Grab the CSRF token and default region
csrf_token = html.fromstring(response).xpath(
'//input[@name="csrfmiddlewaretoken"]/@value')[0]
region = html.fromstring(response).xpath(
'//input[@name="region"]/@value')[0]
# Prepare login form request
req = urllib2.Request(CONF.dashboard.login_url)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('Referer', CONF.dashboard.dashboard_url)
params = {'username': CONF.identity.username,
'password': CONF.identity.password,
'region': region,
'csrfmiddlewaretoken': csrf_token}
self.opener.open(req, urllib.urlencode(params))
def check_home_page(self):
response = self.opener.open(CONF.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
@test.services('dashboard')
def test_basic_scenario(self):
self.check_login_page()
self.user_login()
self.check_home_page()
| {
"content_hash": "92c8e5142eb289b3ef0203f339389bfa",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 75,
"avg_line_length": 33.016129032258064,
"alnum_prop": 0.6521739130434783,
"repo_name": "nikolay-fedotov/tempest",
"id": "f218fb23469976b01416cda21c404740177fc4a6",
"size": "2645",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/scenario/test_dashboard_basic_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import posixpath
from recipe_engine import recipe_api
MOUNT_SRC = '/SRC'
MOUNT_OUT = '/OUT'
class DockerApi(recipe_api.RecipeApi):
def _chmod(self, filepath, mode, recursive=False):
cmd = ['chmod']
if recursive:
cmd.append('-R')
cmd.extend([mode, filepath])
name = ' '.join([str(elem) for elem in cmd])
self.m.step(name, cmd=cmd, infra_step=True)
def mount_src(self):
return MOUNT_SRC
def mount_out(self):
return MOUNT_OUT
def run(self, name, docker_image, src_dir, out_dir, script, args=None, docker_args=None, copies=None, recursive_read=None, attempts=1):
# Setup. Docker runs as a different user, so we need to give it access to
# read, write, and execute certain files.
with self.m.step.nest('Docker setup'):
# Make sure out_dir exists, otherwise mounting will fail.
# (Note that the docker --mount option, unlike the --volume option, does
# not create this dir as root if it doesn't exist.)
self.m.file.ensure_directory('mkdirs out_dir', out_dir, mode=0777)
# ensure_directory won't change the permissions if the dir already exists,
# so we need to do that explicitly.
self._chmod(out_dir, '777')
# chmod the src_dir, but not recursively; Swarming writes some files which
# we can't access, so "chmod -R" will fail if this is the root workdir.
self._chmod(src_dir, '755')
# Need to make the script executable, or Docker can't run it.
self._chmod(script, '0755')
# Copy any requested files.
if copies:
for src, dest in copies.iteritems():
dirname = self.m.path.dirname(dest)
self.m.file.ensure_directory(
'mkdirs %s' % dirname, dirname, mode=0777)
self.m.file.copy('cp %s %s' % (src, dest), src, dest)
self._chmod(dest, '644')
# Recursive chmod any requested directories.
if recursive_read:
for elem in recursive_read:
self._chmod(elem, 'a+r', recursive=True)
# Run.
cmd = [
'docker', 'run', '--shm-size=2gb', '--rm',
'--mount', 'type=bind,source=%s,target=%s' % (src_dir, MOUNT_SRC),
'--mount', 'type=bind,source=%s,target=%s' % (out_dir, MOUNT_OUT),
]
if docker_args:
cmd.extend(docker_args)
script_rel = posixpath.relpath(str(script), str(self.m.path['start_dir']))
cmd.extend([docker_image, MOUNT_SRC + '/' + script_rel])
if args:
cmd.extend(args)
env = {'DOCKER_CONFIG': '/home/chrome-bot/.docker'}
with self.m.env(env):
self.m.run.with_retry(self.m.step, name, attempts, cmd=cmd)
| {
"content_hash": "02e748eda2ee503e1ab8985f5c834b22",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 137,
"avg_line_length": 36.19444444444444,
"alnum_prop": 0.6250959324635457,
"repo_name": "HalCanary/skia-hc",
"id": "0170d01de85e7bad8a37baf02f04813fef7ffbd9",
"size": "2740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infra/bots/recipe_modules/docker/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1277297"
},
{
"name": "Batchfile",
"bytes": "865"
},
{
"name": "C",
"bytes": "505166"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "32234337"
},
{
"name": "CMake",
"bytes": "2850"
},
{
"name": "CSS",
"bytes": "3078"
},
{
"name": "Dockerfile",
"bytes": "14764"
},
{
"name": "GLSL",
"bytes": "109164"
},
{
"name": "Go",
"bytes": "135327"
},
{
"name": "HTML",
"bytes": "1321397"
},
{
"name": "Java",
"bytes": "167849"
},
{
"name": "JavaScript",
"bytes": "463920"
},
{
"name": "Lex",
"bytes": "2521"
},
{
"name": "Lua",
"bytes": "70982"
},
{
"name": "Makefile",
"bytes": "13502"
},
{
"name": "Objective-C",
"bytes": "83351"
},
{
"name": "Objective-C++",
"bytes": "366996"
},
{
"name": "PHP",
"bytes": "139510"
},
{
"name": "PowerShell",
"bytes": "1432"
},
{
"name": "Python",
"bytes": "1055437"
},
{
"name": "Shell",
"bytes": "95010"
}
],
"symlink_target": ""
} |
import binwalk.core.plugin
class Unjffs2DepreciatedPlugin(binwalk.core.plugin.Plugin):
pass
| {
"content_hash": "719b47a13181173a19dc2e8e3967ea43",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 59,
"avg_line_length": 19.6,
"alnum_prop": 0.8061224489795918,
"repo_name": "devttys0/binwalk",
"id": "1a2604f294cdaba264a996db828c09e10f891c1f",
"size": "268",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/binwalk/plugins/unjffs2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "279881"
},
{
"name": "Shell",
"bytes": "6465"
}
],
"symlink_target": ""
} |
def extraNumber(a, b, c):
return c if a == b else a if a != c else b
| {
"content_hash": "cc9c9879a59957ef7bf73acf604a8b2e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 46,
"avg_line_length": 36.5,
"alnum_prop": 0.5753424657534246,
"repo_name": "RevansChen/online-judge",
"id": "caa426f601621620caf8a388e6ae7b1cc2725f33",
"size": "84",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codefights/arcade/code-arcade/level-2/11.Extra-Number/Python/solution1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Brainfuck",
"bytes": "102"
},
{
"name": "C",
"bytes": "6829"
},
{
"name": "C#",
"bytes": "19758"
},
{
"name": "C++",
"bytes": "9439"
},
{
"name": "Clojure",
"bytes": "75"
},
{
"name": "CoffeeScript",
"bytes": "903"
},
{
"name": "Crystal",
"bytes": "52"
},
{
"name": "Dart",
"bytes": "182"
},
{
"name": "Elixir",
"bytes": "1027"
},
{
"name": "Erlang",
"bytes": "132"
},
{
"name": "F#",
"bytes": "40"
},
{
"name": "Go",
"bytes": "83"
},
{
"name": "Haskell",
"bytes": "102"
},
{
"name": "Java",
"bytes": "11057"
},
{
"name": "JavaScript",
"bytes": "44773"
},
{
"name": "Kotlin",
"bytes": "82"
},
{
"name": "Lua",
"bytes": "93"
},
{
"name": "PHP",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "563400"
},
{
"name": "R",
"bytes": "265"
},
{
"name": "Ruby",
"bytes": "7171"
},
{
"name": "Rust",
"bytes": "74"
},
{
"name": "Scala",
"bytes": "84"
},
{
"name": "Shell",
"bytes": "438"
},
{
"name": "Swift",
"bytes": "6597"
},
{
"name": "TSQL",
"bytes": "3531"
},
{
"name": "TypeScript",
"bytes": "5744"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ploghubapp', '0005_auto_20170804_0901'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_text', models.TextField()),
('comment_text_html', models.TextField()),
('deleted', models.BooleanField(default=False)),
('upvotes', models.IntegerField(default=0)),
('downvotes', models.IntegerField(default=0)),
('net_votes', models.IntegerField(default=1)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='ploghubapp.Comment')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ploghubapp.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='HistoricalComment',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('comment_text', models.TextField()),
('comment_text_html', models.TextField()),
('deleted', models.BooleanField(default=False)),
('upvotes', models.IntegerField(default=0)),
('downvotes', models.IntegerField(default=0)),
('net_votes', models.IntegerField(default=1)),
('created', models.DateTimeField(blank=True, editable=False)),
('updated', models.DateTimeField(blank=True, editable=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('parent', mptt.fields.TreeForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='ploghubapp.HistoricalComment')),
('post', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='ploghubapp.Post')),
('user', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical comment',
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
},
),
]
| {
"content_hash": "c9062085abf91da5508fa5aca614075a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 200,
"avg_line_length": 58.220588235294116,
"alnum_prop": 0.5950997726698661,
"repo_name": "ploggingdev/ploghub",
"id": "99fe6dfa1b4178a168b9a2d50dfb7c69c7ee98ad",
"size": "4032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ploghubapp/migrations/0006_comment_historicalcomment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38789"
},
{
"name": "JavaScript",
"bytes": "8540"
},
{
"name": "Python",
"bytes": "71833"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._sql_resources_operations import (
build_create_update_client_encryption_key_request,
build_create_update_sql_container_request,
build_create_update_sql_database_request,
build_create_update_sql_role_assignment_request,
build_create_update_sql_role_definition_request,
build_create_update_sql_stored_procedure_request,
build_create_update_sql_trigger_request,
build_create_update_sql_user_defined_function_request,
build_delete_sql_container_request,
build_delete_sql_database_request,
build_delete_sql_role_assignment_request,
build_delete_sql_role_definition_request,
build_delete_sql_stored_procedure_request,
build_delete_sql_trigger_request,
build_delete_sql_user_defined_function_request,
build_get_client_encryption_key_request,
build_get_sql_container_request,
build_get_sql_container_throughput_request,
build_get_sql_database_request,
build_get_sql_database_throughput_request,
build_get_sql_role_assignment_request,
build_get_sql_role_definition_request,
build_get_sql_stored_procedure_request,
build_get_sql_trigger_request,
build_get_sql_user_defined_function_request,
build_list_client_encryption_keys_request,
build_list_sql_container_partition_merge_request,
build_list_sql_containers_request,
build_list_sql_databases_request,
build_list_sql_role_assignments_request,
build_list_sql_role_definitions_request,
build_list_sql_stored_procedures_request,
build_list_sql_triggers_request,
build_list_sql_user_defined_functions_request,
build_migrate_sql_container_to_autoscale_request,
build_migrate_sql_container_to_manual_throughput_request,
build_migrate_sql_database_to_autoscale_request,
build_migrate_sql_database_to_manual_throughput_request,
build_retrieve_continuous_backup_information_request,
build_sql_container_redistribute_throughput_request,
build_sql_container_retrieve_throughput_distribution_request,
build_sql_database_redistribute_throughput_request,
build_sql_database_retrieve_throughput_distribution_request,
build_update_sql_container_throughput_request,
build_update_sql_database_throughput_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SqlResourcesOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.cosmosdb.aio.CosmosDBManagementClient`'s
:attr:`sql_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_sql_databases(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> AsyncIterable["_models.SqlDatabaseGetResults"]:
"""Lists the SQL databases under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlDatabaseGetResults or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlDatabaseGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlDatabaseListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_databases_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_sql_databases.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlDatabaseListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_sql_databases.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases"} # type: ignore
@distributed_trace_async
async def get_sql_database(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> _models.SqlDatabaseGetResults:
"""Gets the SQL database under an existing Azure Cosmos DB database account with the provided
name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlDatabaseGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlDatabaseGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlDatabaseGetResults]
request = build_get_sql_database_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_database.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SqlDatabaseGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_database.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
async def _create_update_sql_database_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
create_update_sql_database_parameters: Union[_models.SqlDatabaseCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.SqlDatabaseGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.SqlDatabaseGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_sql_database_parameters, (IO, bytes)):
_content = create_update_sql_database_parameters
else:
_json = self._serialize.body(create_update_sql_database_parameters, "SqlDatabaseCreateUpdateParameters")
request = build_create_update_sql_database_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_sql_database_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SqlDatabaseGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_database_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
@overload
async def begin_create_update_sql_database(
self,
resource_group_name: str,
account_name: str,
database_name: str,
create_update_sql_database_parameters: _models.SqlDatabaseCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlDatabaseGetResults]:
"""Create or update an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param create_update_sql_database_parameters: The parameters to provide for the current SQL
database. Required.
:type create_update_sql_database_parameters:
~azure.mgmt.cosmosdb.models.SqlDatabaseCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlDatabaseGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlDatabaseGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_update_sql_database(
self,
resource_group_name: str,
account_name: str,
database_name: str,
create_update_sql_database_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlDatabaseGetResults]:
"""Create or update an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param create_update_sql_database_parameters: The parameters to provide for the current SQL
database. Required.
:type create_update_sql_database_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlDatabaseGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlDatabaseGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_update_sql_database(
self,
resource_group_name: str,
account_name: str,
database_name: str,
create_update_sql_database_parameters: Union[_models.SqlDatabaseCreateUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.SqlDatabaseGetResults]:
"""Create or update an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param create_update_sql_database_parameters: The parameters to provide for the current SQL
database. Is either a model type or a IO type. Required.
:type create_update_sql_database_parameters:
~azure.mgmt.cosmosdb.models.SqlDatabaseCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlDatabaseGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlDatabaseGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlDatabaseGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_database_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
create_update_sql_database_parameters=create_update_sql_database_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SqlDatabaseGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_database.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
async def _delete_sql_database_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_sql_database_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_sql_database_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_database_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_database(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_database_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_database.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
@distributed_trace_async
async def get_sql_database_throughput(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> _models.ThroughputSettingsGetResults:
"""Gets the RUs per second of the SQL database under an existing Azure Cosmos DB database account
with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ThroughputSettingsGetResults]
request = build_get_sql_database_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_database_throughput.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_database_throughput.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default"} # type: ignore
async def _update_sql_database_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ThroughputSettingsGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(update_throughput_parameters, (IO, bytes)):
_content = update_throughput_parameters
else:
_json = self._serialize.body(update_throughput_parameters, "ThroughputSettingsUpdateParameters")
request = build_update_sql_database_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_sql_database_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_sql_database_throughput_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default"} # type: ignore
@overload
async def begin_update_sql_database_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
update_throughput_parameters: _models.ThroughputSettingsUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param update_throughput_parameters: The parameters to provide for the RUs per second of the
current SQL database. Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update_sql_database_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
update_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param update_throughput_parameters: The parameters to provide for the RUs per second of the
current SQL database. Required.
:type update_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update_sql_database_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param update_throughput_parameters: The parameters to provide for the RUs per second of the
current SQL database. Is either a model type or a IO type. Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ThroughputSettingsGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_sql_database_throughput_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
update_throughput_parameters=update_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_sql_database_throughput.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default"} # type: ignore
async def _migrate_sql_database_to_autoscale_initial(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ThroughputSettingsGetResults]]
request = build_migrate_sql_database_to_autoscale_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_sql_database_to_autoscale_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_sql_database_to_autoscale_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale"} # type: ignore
@distributed_trace_async
async def begin_migrate_sql_database_to_autoscale(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB SQL database from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ThroughputSettingsGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._migrate_sql_database_to_autoscale_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_sql_database_to_autoscale.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale"} # type: ignore
async def _migrate_sql_database_to_manual_throughput_initial(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ThroughputSettingsGetResults]]
request = build_migrate_sql_database_to_manual_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_sql_database_to_manual_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_sql_database_to_manual_throughput_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput"} # type: ignore
@distributed_trace_async
async def begin_migrate_sql_database_to_manual_throughput(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB SQL database from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ThroughputSettingsGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._migrate_sql_database_to_manual_throughput_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_sql_database_to_manual_throughput.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput"} # type: ignore
@distributed_trace
def list_client_encryption_keys(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> AsyncIterable["_models.ClientEncryptionKeyGetResults"]:
"""Lists the ClientEncryptionKeys under an existing Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClientEncryptionKeyGetResults or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.ClientEncryptionKeyGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ClientEncryptionKeysListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_client_encryption_keys_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_client_encryption_keys.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ClientEncryptionKeysListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_client_encryption_keys.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys"} # type: ignore
@distributed_trace_async
async def get_client_encryption_key(
self,
resource_group_name: str,
account_name: str,
database_name: str,
client_encryption_key_name: str,
**kwargs: Any
) -> _models.ClientEncryptionKeyGetResults:
"""Gets the ClientEncryptionKey under an existing Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param client_encryption_key_name: Cosmos DB ClientEncryptionKey name. Required.
:type client_encryption_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClientEncryptionKeyGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ClientEncryptionKeyGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ClientEncryptionKeyGetResults]
request = build_get_client_encryption_key_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
client_encryption_key_name=client_encryption_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_client_encryption_key.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ClientEncryptionKeyGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_client_encryption_key.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys/{clientEncryptionKeyName}"} # type: ignore
async def _create_update_client_encryption_key_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
client_encryption_key_name: str,
create_update_client_encryption_key_parameters: Union[_models.ClientEncryptionKeyCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.ClientEncryptionKeyGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ClientEncryptionKeyGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_client_encryption_key_parameters, (IO, bytes)):
_content = create_update_client_encryption_key_parameters
else:
_json = self._serialize.body(
create_update_client_encryption_key_parameters, "ClientEncryptionKeyCreateUpdateParameters"
)
request = build_create_update_client_encryption_key_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
client_encryption_key_name=client_encryption_key_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_client_encryption_key_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ClientEncryptionKeyGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_client_encryption_key_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys/{clientEncryptionKeyName}"} # type: ignore
@overload
async def begin_create_update_client_encryption_key(
self,
resource_group_name: str,
account_name: str,
database_name: str,
client_encryption_key_name: str,
create_update_client_encryption_key_parameters: _models.ClientEncryptionKeyCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.ClientEncryptionKeyGetResults]:
"""Create or update a ClientEncryptionKey. This API is meant to be invoked via tools such as the
Azure Powershell (instead of directly).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param client_encryption_key_name: Cosmos DB ClientEncryptionKey name. Required.
:type client_encryption_key_name: str
:param create_update_client_encryption_key_parameters: The parameters to provide for the client
encryption key. Required.
:type create_update_client_encryption_key_parameters:
~azure.mgmt.cosmosdb.models.ClientEncryptionKeyCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ClientEncryptionKeyGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ClientEncryptionKeyGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_update_client_encryption_key(
self,
resource_group_name: str,
account_name: str,
database_name: str,
client_encryption_key_name: str,
create_update_client_encryption_key_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.ClientEncryptionKeyGetResults]:
"""Create or update a ClientEncryptionKey. This API is meant to be invoked via tools such as the
Azure Powershell (instead of directly).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param client_encryption_key_name: Cosmos DB ClientEncryptionKey name. Required.
:type client_encryption_key_name: str
:param create_update_client_encryption_key_parameters: The parameters to provide for the client
encryption key. Required.
:type create_update_client_encryption_key_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ClientEncryptionKeyGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ClientEncryptionKeyGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_update_client_encryption_key(
self,
resource_group_name: str,
account_name: str,
database_name: str,
client_encryption_key_name: str,
create_update_client_encryption_key_parameters: Union[_models.ClientEncryptionKeyCreateUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.ClientEncryptionKeyGetResults]:
"""Create or update a ClientEncryptionKey. This API is meant to be invoked via tools such as the
Azure Powershell (instead of directly).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param client_encryption_key_name: Cosmos DB ClientEncryptionKey name. Required.
:type client_encryption_key_name: str
:param create_update_client_encryption_key_parameters: The parameters to provide for the client
encryption key. Is either a model type or a IO type. Required.
:type create_update_client_encryption_key_parameters:
~azure.mgmt.cosmosdb.models.ClientEncryptionKeyCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ClientEncryptionKeyGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ClientEncryptionKeyGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ClientEncryptionKeyGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_client_encryption_key_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
client_encryption_key_name=client_encryption_key_name,
create_update_client_encryption_key_parameters=create_update_client_encryption_key_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ClientEncryptionKeyGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_client_encryption_key.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys/{clientEncryptionKeyName}"} # type: ignore
@distributed_trace
def list_sql_containers(
self, resource_group_name: str, account_name: str, database_name: str, **kwargs: Any
) -> AsyncIterable["_models.SqlContainerGetResults"]:
"""Lists the SQL container under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlContainerGetResults or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlContainerGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlContainerListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_containers_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_sql_containers.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlContainerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_sql_containers.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers"} # type: ignore
@distributed_trace_async
async def get_sql_container(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> _models.SqlContainerGetResults:
"""Gets the SQL container under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlContainerGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlContainerGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlContainerGetResults]
request = build_get_sql_container_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_container.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SqlContainerGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_container.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
async def _create_update_sql_container_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
create_update_sql_container_parameters: Union[_models.SqlContainerCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.SqlContainerGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.SqlContainerGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_sql_container_parameters, (IO, bytes)):
_content = create_update_sql_container_parameters
else:
_json = self._serialize.body(create_update_sql_container_parameters, "SqlContainerCreateUpdateParameters")
request = build_create_update_sql_container_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_sql_container_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SqlContainerGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_container_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
@overload
async def begin_create_update_sql_container(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
create_update_sql_container_parameters: _models.SqlContainerCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlContainerGetResults]:
"""Create or update an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param create_update_sql_container_parameters: The parameters to provide for the current SQL
container. Required.
:type create_update_sql_container_parameters:
~azure.mgmt.cosmosdb.models.SqlContainerCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlContainerGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlContainerGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_update_sql_container(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
create_update_sql_container_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlContainerGetResults]:
"""Create or update an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param create_update_sql_container_parameters: The parameters to provide for the current SQL
container. Required.
:type create_update_sql_container_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlContainerGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlContainerGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_update_sql_container(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
create_update_sql_container_parameters: Union[_models.SqlContainerCreateUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.SqlContainerGetResults]:
"""Create or update an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param create_update_sql_container_parameters: The parameters to provide for the current SQL
container. Is either a model type or a IO type. Required.
:type create_update_sql_container_parameters:
~azure.mgmt.cosmosdb.models.SqlContainerCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlContainerGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlContainerGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlContainerGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_container_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
create_update_sql_container_parameters=create_update_sql_container_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SqlContainerGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_container.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
async def _delete_sql_container_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_sql_container_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_sql_container_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_container_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_container(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_container_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_container.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
async def _list_sql_container_partition_merge_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
merge_parameters: Union[_models.MergeParameters, IO],
**kwargs: Any
) -> Optional[_models.PhysicalPartitionStorageInfoCollection]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.PhysicalPartitionStorageInfoCollection]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(merge_parameters, (IO, bytes)):
_content = merge_parameters
else:
_json = self._serialize.body(merge_parameters, "MergeParameters")
request = build_list_sql_container_partition_merge_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._list_sql_container_partition_merge_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PhysicalPartitionStorageInfoCollection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_sql_container_partition_merge_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/partitionMerge"} # type: ignore
@overload
async def begin_list_sql_container_partition_merge(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
merge_parameters: _models.MergeParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionStorageInfoCollection]:
"""Merges the partitions of a SQL Container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param merge_parameters: The parameters for the merge operation. Required.
:type merge_parameters: ~azure.mgmt.cosmosdb.models.MergeParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionStorageInfoCollection or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionStorageInfoCollection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_list_sql_container_partition_merge(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
merge_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionStorageInfoCollection]:
"""Merges the partitions of a SQL Container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param merge_parameters: The parameters for the merge operation. Required.
:type merge_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionStorageInfoCollection or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionStorageInfoCollection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_list_sql_container_partition_merge(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
merge_parameters: Union[_models.MergeParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionStorageInfoCollection]:
"""Merges the partitions of a SQL Container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param merge_parameters: The parameters for the merge operation. Is either a model type or a IO
type. Required.
:type merge_parameters: ~azure.mgmt.cosmosdb.models.MergeParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionStorageInfoCollection or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionStorageInfoCollection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PhysicalPartitionStorageInfoCollection]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_sql_container_partition_merge_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
merge_parameters=merge_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PhysicalPartitionStorageInfoCollection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_sql_container_partition_merge.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/partitionMerge"} # type: ignore
@distributed_trace_async
async def get_sql_container_throughput(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> _models.ThroughputSettingsGetResults:
"""Gets the RUs per second of the SQL container under an existing Azure Cosmos DB database
account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ThroughputSettingsGetResults]
request = build_get_sql_container_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_container_throughput.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_container_throughput.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default"} # type: ignore
async def _update_sql_container_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ThroughputSettingsGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(update_throughput_parameters, (IO, bytes)):
_content = update_throughput_parameters
else:
_json = self._serialize.body(update_throughput_parameters, "ThroughputSettingsUpdateParameters")
request = build_update_sql_container_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_sql_container_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_sql_container_throughput_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default"} # type: ignore
@overload
async def begin_update_sql_container_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
update_throughput_parameters: _models.ThroughputSettingsUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param update_throughput_parameters: The parameters to provide for the RUs per second of the
current SQL container. Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update_sql_container_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
update_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param update_throughput_parameters: The parameters to provide for the RUs per second of the
current SQL container. Required.
:type update_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update_sql_container_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
update_throughput_parameters: Union[_models.ThroughputSettingsUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Update RUs per second of an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param update_throughput_parameters: The parameters to provide for the RUs per second of the
current SQL container. Is either a model type or a IO type. Required.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ThroughputSettingsGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_sql_container_throughput_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
update_throughput_parameters=update_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_sql_container_throughput.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default"} # type: ignore
async def _migrate_sql_container_to_autoscale_initial(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ThroughputSettingsGetResults]]
request = build_migrate_sql_container_to_autoscale_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_sql_container_to_autoscale_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_sql_container_to_autoscale_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/migrateToAutoscale"} # type: ignore
@distributed_trace_async
async def begin_migrate_sql_container_to_autoscale(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB SQL container from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ThroughputSettingsGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._migrate_sql_container_to_autoscale_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_sql_container_to_autoscale.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/migrateToAutoscale"} # type: ignore
async def _migrate_sql_container_to_manual_throughput_initial(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> Optional[_models.ThroughputSettingsGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ThroughputSettingsGetResults]]
request = build_migrate_sql_container_to_manual_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._migrate_sql_container_to_manual_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_sql_container_to_manual_throughput_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/migrateToManualThroughput"} # type: ignore
@distributed_trace_async
async def begin_migrate_sql_container_to_manual_throughput(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.ThroughputSettingsGetResults]:
"""Migrate an Azure Cosmos DB SQL container from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ThroughputSettingsGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._migrate_sql_container_to_manual_throughput_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("ThroughputSettingsGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_sql_container_to_manual_throughput.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/migrateToManualThroughput"} # type: ignore
async def _sql_database_retrieve_throughput_distribution_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
retrieve_throughput_parameters: Union[_models.RetrieveThroughputParameters, IO],
**kwargs: Any
) -> Optional[_models.PhysicalPartitionThroughputInfoResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.PhysicalPartitionThroughputInfoResult]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(retrieve_throughput_parameters, (IO, bytes)):
_content = retrieve_throughput_parameters
else:
_json = self._serialize.body(retrieve_throughput_parameters, "RetrieveThroughputParameters")
request = build_sql_database_retrieve_throughput_distribution_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._sql_database_retrieve_throughput_distribution_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PhysicalPartitionThroughputInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_sql_database_retrieve_throughput_distribution_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/retrieveThroughputDistribution"} # type: ignore
@overload
async def begin_sql_database_retrieve_throughput_distribution(
self,
resource_group_name: str,
account_name: str,
database_name: str,
retrieve_throughput_parameters: _models.RetrieveThroughputParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Retrieve throughput distribution for an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param retrieve_throughput_parameters: The parameters to provide for retrieving throughput
distribution for the current SQL database. Required.
:type retrieve_throughput_parameters: ~azure.mgmt.cosmosdb.models.RetrieveThroughputParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_sql_database_retrieve_throughput_distribution(
self,
resource_group_name: str,
account_name: str,
database_name: str,
retrieve_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Retrieve throughput distribution for an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param retrieve_throughput_parameters: The parameters to provide for retrieving throughput
distribution for the current SQL database. Required.
:type retrieve_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_sql_database_retrieve_throughput_distribution(
self,
resource_group_name: str,
account_name: str,
database_name: str,
retrieve_throughput_parameters: Union[_models.RetrieveThroughputParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Retrieve throughput distribution for an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param retrieve_throughput_parameters: The parameters to provide for retrieving throughput
distribution for the current SQL database. Is either a model type or a IO type. Required.
:type retrieve_throughput_parameters: ~azure.mgmt.cosmosdb.models.RetrieveThroughputParameters
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PhysicalPartitionThroughputInfoResult]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._sql_database_retrieve_throughput_distribution_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
retrieve_throughput_parameters=retrieve_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PhysicalPartitionThroughputInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_sql_database_retrieve_throughput_distribution.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/retrieveThroughputDistribution"} # type: ignore
async def _sql_database_redistribute_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
redistribute_throughput_parameters: Union[_models.RedistributeThroughputParameters, IO],
**kwargs: Any
) -> Optional[_models.PhysicalPartitionThroughputInfoResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.PhysicalPartitionThroughputInfoResult]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(redistribute_throughput_parameters, (IO, bytes)):
_content = redistribute_throughput_parameters
else:
_json = self._serialize.body(redistribute_throughput_parameters, "RedistributeThroughputParameters")
request = build_sql_database_redistribute_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._sql_database_redistribute_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PhysicalPartitionThroughputInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_sql_database_redistribute_throughput_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/redistributeThroughput"} # type: ignore
@overload
async def begin_sql_database_redistribute_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
redistribute_throughput_parameters: _models.RedistributeThroughputParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Redistribute throughput for an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param redistribute_throughput_parameters: The parameters to provide for redistributing
throughput for the current SQL database. Required.
:type redistribute_throughput_parameters:
~azure.mgmt.cosmosdb.models.RedistributeThroughputParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_sql_database_redistribute_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
redistribute_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Redistribute throughput for an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param redistribute_throughput_parameters: The parameters to provide for redistributing
throughput for the current SQL database. Required.
:type redistribute_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_sql_database_redistribute_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
redistribute_throughput_parameters: Union[_models.RedistributeThroughputParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Redistribute throughput for an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param redistribute_throughput_parameters: The parameters to provide for redistributing
throughput for the current SQL database. Is either a model type or a IO type. Required.
:type redistribute_throughput_parameters:
~azure.mgmt.cosmosdb.models.RedistributeThroughputParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PhysicalPartitionThroughputInfoResult]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._sql_database_redistribute_throughput_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
redistribute_throughput_parameters=redistribute_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PhysicalPartitionThroughputInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_sql_database_redistribute_throughput.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/redistributeThroughput"} # type: ignore
async def _sql_container_retrieve_throughput_distribution_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
retrieve_throughput_parameters: Union[_models.RetrieveThroughputParameters, IO],
**kwargs: Any
) -> Optional[_models.PhysicalPartitionThroughputInfoResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.PhysicalPartitionThroughputInfoResult]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(retrieve_throughput_parameters, (IO, bytes)):
_content = retrieve_throughput_parameters
else:
_json = self._serialize.body(retrieve_throughput_parameters, "RetrieveThroughputParameters")
request = build_sql_container_retrieve_throughput_distribution_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._sql_container_retrieve_throughput_distribution_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PhysicalPartitionThroughputInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_sql_container_retrieve_throughput_distribution_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/retrieveThroughputDistribution"} # type: ignore
@overload
async def begin_sql_container_retrieve_throughput_distribution(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
retrieve_throughput_parameters: _models.RetrieveThroughputParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Retrieve throughput distribution for an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param retrieve_throughput_parameters: The parameters to provide for retrieving throughput
distribution for the current SQL container. Required.
:type retrieve_throughput_parameters: ~azure.mgmt.cosmosdb.models.RetrieveThroughputParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_sql_container_retrieve_throughput_distribution(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
retrieve_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Retrieve throughput distribution for an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param retrieve_throughput_parameters: The parameters to provide for retrieving throughput
distribution for the current SQL container. Required.
:type retrieve_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_sql_container_retrieve_throughput_distribution(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
retrieve_throughput_parameters: Union[_models.RetrieveThroughputParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Retrieve throughput distribution for an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param retrieve_throughput_parameters: The parameters to provide for retrieving throughput
distribution for the current SQL container. Is either a model type or a IO type. Required.
:type retrieve_throughput_parameters: ~azure.mgmt.cosmosdb.models.RetrieveThroughputParameters
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PhysicalPartitionThroughputInfoResult]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._sql_container_retrieve_throughput_distribution_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
retrieve_throughput_parameters=retrieve_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PhysicalPartitionThroughputInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_sql_container_retrieve_throughput_distribution.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/retrieveThroughputDistribution"} # type: ignore
async def _sql_container_redistribute_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
redistribute_throughput_parameters: Union[_models.RedistributeThroughputParameters, IO],
**kwargs: Any
) -> Optional[_models.PhysicalPartitionThroughputInfoResult]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.PhysicalPartitionThroughputInfoResult]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(redistribute_throughput_parameters, (IO, bytes)):
_content = redistribute_throughput_parameters
else:
_json = self._serialize.body(redistribute_throughput_parameters, "RedistributeThroughputParameters")
request = build_sql_container_redistribute_throughput_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._sql_container_redistribute_throughput_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PhysicalPartitionThroughputInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_sql_container_redistribute_throughput_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/redistributeThroughput"} # type: ignore
@overload
async def begin_sql_container_redistribute_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
redistribute_throughput_parameters: _models.RedistributeThroughputParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Redistribute throughput for an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param redistribute_throughput_parameters: The parameters to provide for redistributing
throughput for the current SQL container. Required.
:type redistribute_throughput_parameters:
~azure.mgmt.cosmosdb.models.RedistributeThroughputParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_sql_container_redistribute_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
redistribute_throughput_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Redistribute throughput for an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param redistribute_throughput_parameters: The parameters to provide for redistributing
throughput for the current SQL container. Required.
:type redistribute_throughput_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_sql_container_redistribute_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
redistribute_throughput_parameters: Union[_models.RedistributeThroughputParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PhysicalPartitionThroughputInfoResult]:
"""Redistribute throughput for an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param redistribute_throughput_parameters: The parameters to provide for redistributing
throughput for the current SQL container. Is either a model type or a IO type. Required.
:type redistribute_throughput_parameters:
~azure.mgmt.cosmosdb.models.RedistributeThroughputParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PhysicalPartitionThroughputInfoResult]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._sql_container_redistribute_throughput_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
redistribute_throughput_parameters=redistribute_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PhysicalPartitionThroughputInfoResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_sql_container_redistribute_throughput.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/redistributeThroughput"} # type: ignore
@distributed_trace
def list_sql_stored_procedures(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> AsyncIterable["_models.SqlStoredProcedureGetResults"]:
"""Lists the SQL storedProcedure under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlStoredProcedureGetResults or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlStoredProcedureListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_stored_procedures_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_sql_stored_procedures.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlStoredProcedureListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_sql_stored_procedures.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures"} # type: ignore
@distributed_trace_async
async def get_sql_stored_procedure(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
**kwargs: Any
) -> _models.SqlStoredProcedureGetResults:
"""Gets the SQL storedProcedure under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param stored_procedure_name: Cosmos DB storedProcedure name. Required.
:type stored_procedure_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlStoredProcedureGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlStoredProcedureGetResults]
request = build_get_sql_stored_procedure_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_stored_procedure.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SqlStoredProcedureGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_stored_procedure.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
async def _create_update_sql_stored_procedure_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
create_update_sql_stored_procedure_parameters: Union[_models.SqlStoredProcedureCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.SqlStoredProcedureGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.SqlStoredProcedureGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_sql_stored_procedure_parameters, (IO, bytes)):
_content = create_update_sql_stored_procedure_parameters
else:
_json = self._serialize.body(
create_update_sql_stored_procedure_parameters, "SqlStoredProcedureCreateUpdateParameters"
)
request = build_create_update_sql_stored_procedure_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_sql_stored_procedure_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SqlStoredProcedureGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_stored_procedure_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
@overload
async def begin_create_update_sql_stored_procedure(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
create_update_sql_stored_procedure_parameters: _models.SqlStoredProcedureCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlStoredProcedureGetResults]:
"""Create or update an Azure Cosmos DB SQL storedProcedure.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param stored_procedure_name: Cosmos DB storedProcedure name. Required.
:type stored_procedure_name: str
:param create_update_sql_stored_procedure_parameters: The parameters to provide for the current
SQL storedProcedure. Required.
:type create_update_sql_stored_procedure_parameters:
~azure.mgmt.cosmosdb.models.SqlStoredProcedureCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlStoredProcedureGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_update_sql_stored_procedure(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
create_update_sql_stored_procedure_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlStoredProcedureGetResults]:
"""Create or update an Azure Cosmos DB SQL storedProcedure.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param stored_procedure_name: Cosmos DB storedProcedure name. Required.
:type stored_procedure_name: str
:param create_update_sql_stored_procedure_parameters: The parameters to provide for the current
SQL storedProcedure. Required.
:type create_update_sql_stored_procedure_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlStoredProcedureGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_update_sql_stored_procedure(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
create_update_sql_stored_procedure_parameters: Union[_models.SqlStoredProcedureCreateUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.SqlStoredProcedureGetResults]:
"""Create or update an Azure Cosmos DB SQL storedProcedure.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param stored_procedure_name: Cosmos DB storedProcedure name. Required.
:type stored_procedure_name: str
:param create_update_sql_stored_procedure_parameters: The parameters to provide for the current
SQL storedProcedure. Is either a model type or a IO type. Required.
:type create_update_sql_stored_procedure_parameters:
~azure.mgmt.cosmosdb.models.SqlStoredProcedureCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlStoredProcedureGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlStoredProcedureGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_stored_procedure_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
create_update_sql_stored_procedure_parameters=create_update_sql_stored_procedure_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SqlStoredProcedureGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_stored_procedure.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
async def _delete_sql_stored_procedure_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_sql_stored_procedure_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_sql_stored_procedure_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_stored_procedure_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_stored_procedure(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL storedProcedure.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param stored_procedure_name: Cosmos DB storedProcedure name. Required.
:type stored_procedure_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_stored_procedure_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_stored_procedure.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
@distributed_trace
def list_sql_user_defined_functions(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> AsyncIterable["_models.SqlUserDefinedFunctionGetResults"]:
"""Lists the SQL userDefinedFunction under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlUserDefinedFunctionGetResults or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlUserDefinedFunctionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_user_defined_functions_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_sql_user_defined_functions.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlUserDefinedFunctionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_sql_user_defined_functions.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions"} # type: ignore
@distributed_trace_async
async def get_sql_user_defined_function(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
**kwargs: Any
) -> _models.SqlUserDefinedFunctionGetResults:
"""Gets the SQL userDefinedFunction under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param user_defined_function_name: Cosmos DB userDefinedFunction name. Required.
:type user_defined_function_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlUserDefinedFunctionGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlUserDefinedFunctionGetResults]
request = build_get_sql_user_defined_function_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_user_defined_function.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SqlUserDefinedFunctionGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_user_defined_function.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
async def _create_update_sql_user_defined_function_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
create_update_sql_user_defined_function_parameters: Union[
_models.SqlUserDefinedFunctionCreateUpdateParameters, IO
],
**kwargs: Any
) -> Optional[_models.SqlUserDefinedFunctionGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.SqlUserDefinedFunctionGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_sql_user_defined_function_parameters, (IO, bytes)):
_content = create_update_sql_user_defined_function_parameters
else:
_json = self._serialize.body(
create_update_sql_user_defined_function_parameters, "SqlUserDefinedFunctionCreateUpdateParameters"
)
request = build_create_update_sql_user_defined_function_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_sql_user_defined_function_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SqlUserDefinedFunctionGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_user_defined_function_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
@overload
async def begin_create_update_sql_user_defined_function(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
create_update_sql_user_defined_function_parameters: _models.SqlUserDefinedFunctionCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlUserDefinedFunctionGetResults]:
"""Create or update an Azure Cosmos DB SQL userDefinedFunction.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param user_defined_function_name: Cosmos DB userDefinedFunction name. Required.
:type user_defined_function_name: str
:param create_update_sql_user_defined_function_parameters: The parameters to provide for the
current SQL userDefinedFunction. Required.
:type create_update_sql_user_defined_function_parameters:
~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlUserDefinedFunctionGetResults or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_update_sql_user_defined_function(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
create_update_sql_user_defined_function_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlUserDefinedFunctionGetResults]:
"""Create or update an Azure Cosmos DB SQL userDefinedFunction.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param user_defined_function_name: Cosmos DB userDefinedFunction name. Required.
:type user_defined_function_name: str
:param create_update_sql_user_defined_function_parameters: The parameters to provide for the
current SQL userDefinedFunction. Required.
:type create_update_sql_user_defined_function_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlUserDefinedFunctionGetResults or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_update_sql_user_defined_function(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
create_update_sql_user_defined_function_parameters: Union[
_models.SqlUserDefinedFunctionCreateUpdateParameters, IO
],
**kwargs: Any
) -> AsyncLROPoller[_models.SqlUserDefinedFunctionGetResults]:
"""Create or update an Azure Cosmos DB SQL userDefinedFunction.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param user_defined_function_name: Cosmos DB userDefinedFunction name. Required.
:type user_defined_function_name: str
:param create_update_sql_user_defined_function_parameters: The parameters to provide for the
current SQL userDefinedFunction. Is either a model type or a IO type. Required.
:type create_update_sql_user_defined_function_parameters:
~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlUserDefinedFunctionGetResults or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlUserDefinedFunctionGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_user_defined_function_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
create_update_sql_user_defined_function_parameters=create_update_sql_user_defined_function_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SqlUserDefinedFunctionGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_user_defined_function.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
async def _delete_sql_user_defined_function_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_sql_user_defined_function_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_sql_user_defined_function_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_user_defined_function_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_user_defined_function(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL userDefinedFunction.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param user_defined_function_name: Cosmos DB userDefinedFunction name. Required.
:type user_defined_function_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_user_defined_function_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_user_defined_function.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
@distributed_trace
def list_sql_triggers(
self, resource_group_name: str, account_name: str, database_name: str, container_name: str, **kwargs: Any
) -> AsyncIterable["_models.SqlTriggerGetResults"]:
"""Lists the SQL trigger under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlTriggerGetResults or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlTriggerGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlTriggerListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_triggers_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_sql_triggers.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlTriggerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_sql_triggers.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers"} # type: ignore
@distributed_trace_async
async def get_sql_trigger(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
**kwargs: Any
) -> _models.SqlTriggerGetResults:
"""Gets the SQL trigger under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param trigger_name: Cosmos DB trigger name. Required.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlTriggerGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlTriggerGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlTriggerGetResults]
request = build_get_sql_trigger_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_trigger.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SqlTriggerGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_trigger.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
async def _create_update_sql_trigger_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
create_update_sql_trigger_parameters: Union[_models.SqlTriggerCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.SqlTriggerGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.SqlTriggerGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_sql_trigger_parameters, (IO, bytes)):
_content = create_update_sql_trigger_parameters
else:
_json = self._serialize.body(create_update_sql_trigger_parameters, "SqlTriggerCreateUpdateParameters")
request = build_create_update_sql_trigger_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_sql_trigger_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SqlTriggerGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_trigger_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
@overload
async def begin_create_update_sql_trigger(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
create_update_sql_trigger_parameters: _models.SqlTriggerCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlTriggerGetResults]:
"""Create or update an Azure Cosmos DB SQL trigger.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param trigger_name: Cosmos DB trigger name. Required.
:type trigger_name: str
:param create_update_sql_trigger_parameters: The parameters to provide for the current SQL
trigger. Required.
:type create_update_sql_trigger_parameters:
~azure.mgmt.cosmosdb.models.SqlTriggerCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlTriggerGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlTriggerGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_update_sql_trigger(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
create_update_sql_trigger_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlTriggerGetResults]:
"""Create or update an Azure Cosmos DB SQL trigger.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param trigger_name: Cosmos DB trigger name. Required.
:type trigger_name: str
:param create_update_sql_trigger_parameters: The parameters to provide for the current SQL
trigger. Required.
:type create_update_sql_trigger_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlTriggerGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlTriggerGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_update_sql_trigger(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
create_update_sql_trigger_parameters: Union[_models.SqlTriggerCreateUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.SqlTriggerGetResults]:
"""Create or update an Azure Cosmos DB SQL trigger.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param trigger_name: Cosmos DB trigger name. Required.
:type trigger_name: str
:param create_update_sql_trigger_parameters: The parameters to provide for the current SQL
trigger. Is either a model type or a IO type. Required.
:type create_update_sql_trigger_parameters:
~azure.mgmt.cosmosdb.models.SqlTriggerCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlTriggerGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlTriggerGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlTriggerGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_trigger_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
create_update_sql_trigger_parameters=create_update_sql_trigger_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SqlTriggerGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_trigger.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
async def _delete_sql_trigger_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_sql_trigger_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_sql_trigger_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_trigger_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_trigger(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL trigger.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param trigger_name: Cosmos DB trigger name. Required.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_trigger_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_trigger.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
@distributed_trace_async
async def get_sql_role_definition(
self, role_definition_id: str, resource_group_name: str, account_name: str, **kwargs: Any
) -> _models.SqlRoleDefinitionGetResults:
"""Retrieves the properties of an existing Azure Cosmos DB SQL Role Definition with the given Id.
:param role_definition_id: The GUID for the Role Definition. Required.
:type role_definition_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlRoleDefinitionGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlRoleDefinitionGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlRoleDefinitionGetResults]
request = build_get_sql_role_definition_request(
role_definition_id=role_definition_id,
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_role_definition.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SqlRoleDefinitionGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_role_definition.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
async def _create_update_sql_role_definition_initial(
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_definition_parameters: Union[_models.SqlRoleDefinitionCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.SqlRoleDefinitionGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.SqlRoleDefinitionGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_sql_role_definition_parameters, (IO, bytes)):
_content = create_update_sql_role_definition_parameters
else:
_json = self._serialize.body(
create_update_sql_role_definition_parameters, "SqlRoleDefinitionCreateUpdateParameters"
)
request = build_create_update_sql_role_definition_request(
role_definition_id=role_definition_id,
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_sql_role_definition_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SqlRoleDefinitionGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_role_definition_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
@overload
async def begin_create_update_sql_role_definition(
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_definition_parameters: _models.SqlRoleDefinitionCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlRoleDefinitionGetResults]:
"""Creates or updates an Azure Cosmos DB SQL Role Definition.
:param role_definition_id: The GUID for the Role Definition. Required.
:type role_definition_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param create_update_sql_role_definition_parameters: The properties required to create or
update a Role Definition. Required.
:type create_update_sql_role_definition_parameters:
~azure.mgmt.cosmosdb.models.SqlRoleDefinitionCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlRoleDefinitionGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlRoleDefinitionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_update_sql_role_definition(
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_definition_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlRoleDefinitionGetResults]:
"""Creates or updates an Azure Cosmos DB SQL Role Definition.
:param role_definition_id: The GUID for the Role Definition. Required.
:type role_definition_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param create_update_sql_role_definition_parameters: The properties required to create or
update a Role Definition. Required.
:type create_update_sql_role_definition_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlRoleDefinitionGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlRoleDefinitionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_update_sql_role_definition(
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_definition_parameters: Union[_models.SqlRoleDefinitionCreateUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.SqlRoleDefinitionGetResults]:
"""Creates or updates an Azure Cosmos DB SQL Role Definition.
:param role_definition_id: The GUID for the Role Definition. Required.
:type role_definition_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param create_update_sql_role_definition_parameters: The properties required to create or
update a Role Definition. Is either a model type or a IO type. Required.
:type create_update_sql_role_definition_parameters:
~azure.mgmt.cosmosdb.models.SqlRoleDefinitionCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlRoleDefinitionGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlRoleDefinitionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlRoleDefinitionGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_role_definition_initial( # type: ignore
role_definition_id=role_definition_id,
resource_group_name=resource_group_name,
account_name=account_name,
create_update_sql_role_definition_parameters=create_update_sql_role_definition_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SqlRoleDefinitionGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_role_definition.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
async def _delete_sql_role_definition_initial( # pylint: disable=inconsistent-return-statements
self, role_definition_id: str, resource_group_name: str, account_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_sql_role_definition_request(
role_definition_id=role_definition_id,
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_sql_role_definition_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_role_definition_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_role_definition(
self, role_definition_id: str, resource_group_name: str, account_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL Role Definition.
:param role_definition_id: The GUID for the Role Definition. Required.
:type role_definition_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_role_definition_initial( # type: ignore
role_definition_id=role_definition_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_role_definition.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
@distributed_trace
def list_sql_role_definitions(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> AsyncIterable["_models.SqlRoleDefinitionGetResults"]:
"""Retrieves the list of all Azure Cosmos DB SQL Role Definitions.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlRoleDefinitionGetResults or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlRoleDefinitionGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlRoleDefinitionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_role_definitions_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_sql_role_definitions.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlRoleDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_sql_role_definitions.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions"} # type: ignore
@distributed_trace_async
async def get_sql_role_assignment(
self, role_assignment_id: str, resource_group_name: str, account_name: str, **kwargs: Any
) -> _models.SqlRoleAssignmentGetResults:
"""Retrieves the properties of an existing Azure Cosmos DB SQL Role Assignment with the given Id.
:param role_assignment_id: The GUID for the Role Assignment. Required.
:type role_assignment_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlRoleAssignmentGetResults or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlRoleAssignmentGetResults
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlRoleAssignmentGetResults]
request = build_get_sql_role_assignment_request(
role_assignment_id=role_assignment_id,
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_sql_role_assignment.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SqlRoleAssignmentGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_role_assignment.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
async def _create_update_sql_role_assignment_initial(
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_assignment_parameters: Union[_models.SqlRoleAssignmentCreateUpdateParameters, IO],
**kwargs: Any
) -> Optional[_models.SqlRoleAssignmentGetResults]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.SqlRoleAssignmentGetResults]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(create_update_sql_role_assignment_parameters, (IO, bytes)):
_content = create_update_sql_role_assignment_parameters
else:
_json = self._serialize.body(
create_update_sql_role_assignment_parameters, "SqlRoleAssignmentCreateUpdateParameters"
)
request = build_create_update_sql_role_assignment_request(
role_assignment_id=role_assignment_id,
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_update_sql_role_assignment_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SqlRoleAssignmentGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_role_assignment_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
@overload
async def begin_create_update_sql_role_assignment(
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_assignment_parameters: _models.SqlRoleAssignmentCreateUpdateParameters,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlRoleAssignmentGetResults]:
"""Creates or updates an Azure Cosmos DB SQL Role Assignment.
:param role_assignment_id: The GUID for the Role Assignment. Required.
:type role_assignment_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param create_update_sql_role_assignment_parameters: The properties required to create or
update a Role Assignment. Required.
:type create_update_sql_role_assignment_parameters:
~azure.mgmt.cosmosdb.models.SqlRoleAssignmentCreateUpdateParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlRoleAssignmentGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlRoleAssignmentGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_update_sql_role_assignment(
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_assignment_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.SqlRoleAssignmentGetResults]:
"""Creates or updates an Azure Cosmos DB SQL Role Assignment.
:param role_assignment_id: The GUID for the Role Assignment. Required.
:type role_assignment_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param create_update_sql_role_assignment_parameters: The properties required to create or
update a Role Assignment. Required.
:type create_update_sql_role_assignment_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlRoleAssignmentGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlRoleAssignmentGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_update_sql_role_assignment(
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_assignment_parameters: Union[_models.SqlRoleAssignmentCreateUpdateParameters, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.SqlRoleAssignmentGetResults]:
"""Creates or updates an Azure Cosmos DB SQL Role Assignment.
:param role_assignment_id: The GUID for the Role Assignment. Required.
:type role_assignment_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param create_update_sql_role_assignment_parameters: The properties required to create or
update a Role Assignment. Is either a model type or a IO type. Required.
:type create_update_sql_role_assignment_parameters:
~azure.mgmt.cosmosdb.models.SqlRoleAssignmentCreateUpdateParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlRoleAssignmentGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlRoleAssignmentGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlRoleAssignmentGetResults]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_role_assignment_initial( # type: ignore
role_assignment_id=role_assignment_id,
resource_group_name=resource_group_name,
account_name=account_name,
create_update_sql_role_assignment_parameters=create_update_sql_role_assignment_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SqlRoleAssignmentGetResults", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_role_assignment.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
async def _delete_sql_role_assignment_initial( # pylint: disable=inconsistent-return-statements
self, role_assignment_id: str, resource_group_name: str, account_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_sql_role_assignment_request(
role_assignment_id=role_assignment_id,
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_sql_role_assignment_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_role_assignment_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_role_assignment(
self, role_assignment_id: str, resource_group_name: str, account_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL Role Assignment.
:param role_assignment_id: The GUID for the Role Assignment. Required.
:type role_assignment_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_role_assignment_initial( # type: ignore
role_assignment_id=role_assignment_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_role_assignment.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
@distributed_trace
def list_sql_role_assignments(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> AsyncIterable["_models.SqlRoleAssignmentGetResults"]:
"""Retrieves the list of all Azure Cosmos DB SQL Role Assignments.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlRoleAssignmentGetResults or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlRoleAssignmentGetResults]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SqlRoleAssignmentListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_role_assignments_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_sql_role_assignments.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlRoleAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_sql_role_assignments.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments"} # type: ignore
async def _retrieve_continuous_backup_information_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
location: Union[_models.ContinuousBackupRestoreLocation, IO],
**kwargs: Any
) -> Optional[_models.BackupInformation]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.BackupInformation]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(location, (IO, bytes)):
_content = location
else:
_json = self._serialize.body(location, "ContinuousBackupRestoreLocation")
request = build_retrieve_continuous_backup_information_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._retrieve_continuous_backup_information_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("BackupInformation", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_retrieve_continuous_backup_information_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/retrieveContinuousBackupInformation"} # type: ignore
@overload
async def begin_retrieve_continuous_backup_information(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
location: _models.ContinuousBackupRestoreLocation,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.BackupInformation]:
"""Retrieves continuous backup information for a container resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param location: The name of the continuous backup restore location. Required.
:type location: ~azure.mgmt.cosmosdb.models.ContinuousBackupRestoreLocation
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BackupInformation or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.BackupInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_retrieve_continuous_backup_information(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
location: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.BackupInformation]:
"""Retrieves continuous backup information for a container resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param location: The name of the continuous backup restore location. Required.
:type location: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BackupInformation or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.BackupInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_retrieve_continuous_backup_information(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
location: Union[_models.ContinuousBackupRestoreLocation, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.BackupInformation]:
"""Retrieves continuous backup information for a container resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_name: Cosmos DB database name. Required.
:type database_name: str
:param container_name: Cosmos DB container name. Required.
:type container_name: str
:param location: The name of the continuous backup restore location. Is either a model type or
a IO type. Required.
:type location: ~azure.mgmt.cosmosdb.models.ContinuousBackupRestoreLocation or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BackupInformation or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.BackupInformation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-08-15-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BackupInformation]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._retrieve_continuous_backup_information_initial( # type: ignore
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
location=location,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("BackupInformation", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_retrieve_continuous_backup_information.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/retrieveContinuousBackupInformation"} # type: ignore
| {
"content_hash": "f7f899c020cc5de55cd64242771d1cac",
"timestamp": "",
"source": "github",
"line_count": 6979,
"max_line_length": 339,
"avg_line_length": 50.180398337870756,
"alnum_prop": 0.6548032746160151,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2f568d744fea6db07d3f8b3bef7fc4d371d8bfb5",
"size": "350709",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/aio/operations/_sql_resources_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from alert import Alert
from level import Level
| {
"content_hash": "a4f4adf793dbc0972e7ee15232b44f9d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.8333333333333334,
"repo_name": "mogproject/easy-alert",
"id": "7448d6882f60fb4881d926c9001ee4535e61eabe",
"size": "48",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/easy_alert/entity/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "570"
},
{
"name": "Python",
"bytes": "98454"
}
],
"symlink_target": ""
} |
import json
from moto.core.responses import BaseResponse
from .models import codepipeline_backends
class CodePipelineResponse(BaseResponse):
@property
def codepipeline_backend(self):
return codepipeline_backends[self.region]
def create_pipeline(self):
pipeline, tags = self.codepipeline_backend.create_pipeline(
self.region, self._get_param("pipeline"), self._get_param("tags")
)
return json.dumps({"pipeline": pipeline, "tags": tags})
def get_pipeline(self):
pipeline, metadata = self.codepipeline_backend.get_pipeline(
self._get_param("name")
)
return json.dumps({"pipeline": pipeline, "metadata": metadata})
def update_pipeline(self):
pipeline = self.codepipeline_backend.update_pipeline(
self._get_param("pipeline")
)
return json.dumps({"pipeline": pipeline})
def list_pipelines(self):
pipelines = self.codepipeline_backend.list_pipelines()
return json.dumps({"pipelines": pipelines})
def delete_pipeline(self):
self.codepipeline_backend.delete_pipeline(self._get_param("name"))
return ""
def list_tags_for_resource(self):
tags = self.codepipeline_backend.list_tags_for_resource(
self._get_param("resourceArn")
)
return json.dumps({"tags": tags})
def tag_resource(self):
self.codepipeline_backend.tag_resource(
self._get_param("resourceArn"), self._get_param("tags")
)
return ""
def untag_resource(self):
self.codepipeline_backend.untag_resource(
self._get_param("resourceArn"), self._get_param("tagKeys")
)
return ""
| {
"content_hash": "f40f720a25bf786e89a21aa19dd3098b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 77,
"avg_line_length": 28.016129032258064,
"alnum_prop": 0.6286701208981001,
"repo_name": "william-richard/moto",
"id": "0223dfae601979d480d0bfe94f629260ce1a3fb6",
"size": "1737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/codepipeline/responses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
from AestheticsHelper import GetFeatures
from PicSift import app
import os
from threading import Lock
import numpy as np
from collections import defaultdict
import pickle
from PIL import Image
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import time
# load regression
#lr = pickle.load(open("logres.p","rb"))
lr = pickle.load(open("logres_20150208.p","rb"))
#print lr
#stdsc = pickle.load(open("stdsc.p","rb"))
def classify_image(image):
#print image.filename
#
# Classifies the image using logistic regression
# Returns the classification and image_url
#
image_path = image.filename
image_data = np.array(Image.open(image.stream))
try:
t = time.time()
features = GetFeatures(image_data)
print "Took %.2f s to process" % (time.time() - t)
features["LapVar_Hue"] = np.log(features["LapVar_Hue"])
features["LapVar_Saturation"] = np.log(features["LapVar_Saturation"])
features["LapVar_Value"] = np.log(features["LapVar_Value"])
except:
classification = -2
return {"classification": classification,"image_url": image_path}
if np.any([np.isnan(features[k]) for k in features.keys()]):
classification = -1
return {"classification": classification,"image_url": image_path}
else:
features.pop("isGray",None)
X = [features[k] for k in sorted(features.keys())]
#print X
#inputfeatures = stdsc.transform(X)
inputfeatures = X
#print inputfeatures
classification = lr.predict(inputfeatures)[0]
classification_score = lr.predict_proba(inputfeatures)[0][1]
#print classification
return {"classification": classification,
"classification_score": classification_score,
"feature_keys": sorted(features.keys()),
"feature_vals": inputfeatures,
"image_url": image_path}
| {
"content_hash": "4e1df80ab850897f5d12ed6fc1a3d29e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 35.53333333333333,
"alnum_prop": 0.650562851782364,
"repo_name": "kumbhani/PicSift",
"id": "501e960f2c685dfa6e74e89c6d6d7db0f55c756b",
"size": "2132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PicSift/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8303"
},
{
"name": "HTML",
"bytes": "3941"
},
{
"name": "JavaScript",
"bytes": "115378"
},
{
"name": "Jupyter Notebook",
"bytes": "259039"
},
{
"name": "OpenEdge ABL",
"bytes": "6782"
},
{
"name": "Python",
"bytes": "46472"
}
],
"symlink_target": ""
} |
from os.path import abspath, dirname, join
import pandas as pd
import pytest
from histogrammar.dfinterface.spark_histogrammar import SparkHistogrammar
from histogrammar.dfinterface.make_histograms import make_histograms
try:
from pyspark.sql import SparkSession
from pyspark import __version__ as pyspark_version
spark_found = True
except (ModuleNotFoundError, AttributeError):
spark_found = False
def get_spark():
if not spark_found:
return None
current_path = dirname(abspath(__file__))
scala = '2.12' if int(pyspark_version[0]) >= 3 else '2.11'
hist_spark_jar = join(current_path, f"jars/histogrammar-sparksql_{scala}-1.0.20.jar")
hist_jar = join(current_path, f"jars/histogrammar_{scala}-1.0.20.jar")
spark = (
SparkSession.builder.master("local")
.appName("histogrammar-pytest")
.config("spark.jars", f"{hist_spark_jar},{hist_jar}")
.config("spark.sql.session.timeZone", "GMT")
.getOrCreate()
)
return spark
@pytest.fixture
def spark_co():
"""
:return: Spark configuration
"""
spark = get_spark()
return spark
# @pytest.mark.spark
@pytest.mark.skipif(not spark_found, reason="spark not found")
@pytest.mark.filterwarnings(
"ignore:createDataFrame attempted Arrow optimization because"
)
def test_get_histograms(spark_co):
pytest.age["data"]["name"] = "'age'"
pytest.company["data"]["name"] = "'company'"
pytest.eyesColor["data"]["name"] = "'eyeColor'"
pytest.gender["data"]["name"] = "'gender'"
pytest.isActive["data"]["name"] = "'isActive'"
pytest.latitude["data"]["name"] = "'latitude'"
pytest.longitude["data"]["name"] = "'longitude'"
pytest.transaction["data"]["name"] = "'transaction'"
pytest.latitude_longitude["data"]["name"] = "'latitude:longitude'"
pytest.latitude_longitude["data"]["bins:name"] = "unit_func"
spark = spark_co
spark_df = spark.createDataFrame(pytest.test_df)
spark_filler = SparkHistogrammar(
features=[
"date",
"isActive",
"age",
"eyeColor",
"gender",
"company",
"latitude",
"longitude",
["isActive", "age"],
["latitude", "longitude"],
"transaction",
"amount",
],
bin_specs={
"transaction": {"num": 100, "low": -2000, "high": 2000},
"longitude": {"bin_width": 5.0, "bin_offset": 0.0},
"latitude": {"bin_width": 5.0, "bin_offset": 0.0},
},
read_key="input",
store_key="output",
)
# test get_histograms() function call
current_hists = spark_filler.get_histograms(spark_df)
# current_hists = make_histograms(spark_df, features, bin_specs)
assert current_hists["age"].toJson() == pytest.age
assert current_hists["company"].toJson() == pytest.company
assert current_hists["eyeColor"].toJson() == pytest.eyesColor
assert current_hists["gender"].toJson() == pytest.gender
assert current_hists["latitude"].toJson() == pytest.latitude
assert current_hists["longitude"].toJson() == pytest.longitude
assert current_hists["transaction"].toJson() == pytest.transaction
# import json
# with open('tests/popmon/hist/resource/transaction.json', 'w') as outfile:
# json.dump(current_hists["transaction"].toJson(), outfile, indent=4)
# @pytest.mark.spark
@pytest.mark.skipif(not spark_found, reason="spark not found")
@pytest.mark.filterwarnings(
"ignore:createDataFrame attempted Arrow optimization because"
)
def test_get_histograms_module(spark_co):
pytest.age["data"]["name"] = "'age'"
pytest.company["data"]["name"] = "'company'"
pytest.eyesColor["data"]["name"] = "'eyeColor'"
pytest.gender["data"]["name"] = "'gender'"
pytest.isActive["data"]["name"] = "'isActive'"
pytest.latitude["data"]["name"] = "'latitude'"
pytest.longitude["data"]["name"] = "'longitude'"
pytest.latitude_longitude["data"]["name"] = "'latitude:longitude'"
pytest.latitude_longitude["data"]["bins:name"] = "unit_func"
spark = spark_co
spark_df = spark.createDataFrame(pytest.test_df)
spark_filler = SparkHistogrammar(
features=[
"date",
"isActive",
"age",
"eyeColor",
"gender",
"company",
"latitude",
"longitude",
["isActive", "age"],
["latitude", "longitude"],
"amount",
],
bin_specs={
"longitude": {"bin_width": 5.0, "bin_offset": 0.0},
"latitude": {"bin_width": 5.0, "bin_offset": 0.0},
},
read_key="input",
store_key="output",
)
# test transform() function call
datastore = spark_filler.transform(datastore={"input": spark_df})
assert "output" in datastore
current_hists = datastore["output"]
assert current_hists["age"].toJson() == pytest.age
assert current_hists["company"].toJson() == pytest.company
assert current_hists["eyeColor"].toJson() == pytest.eyesColor
assert current_hists["gender"].toJson() == pytest.gender
assert current_hists["latitude"].toJson() == pytest.latitude
assert current_hists["longitude"].toJson() == pytest.longitude
# assert current_hists['date'].toJson() == pytest.date
# assert current_hists['isActive'].toJson() == pytest.isActive
# assert current_hists['isActive:age'].toJson() == pytest.isActive_age
# assert current_hists['latitude:longitude'].toJson() == pytest.latitude_longitude
# @pytest.mark.spark
@pytest.mark.skipif(not spark_found, reason="spark not found")
@pytest.mark.filterwarnings(
"ignore:createDataFrame attempted Arrow optimization because"
)
def test_get_histograms_timestamp(spark_co):
from pyspark.sql.functions import to_timestamp
spark = spark_co
data_date = [
"2018-12-10 00:00:00",
"2018-12-10 00:00:00",
"2018-12-10 00:00:00",
"2018-12-10 00:00:00",
"2018-12-10 00:00:00",
"2018-12-17 00:00:00",
"2018-12-17 00:00:00",
"2018-12-17 00:00:00",
"2018-12-17 00:00:00",
"2018-12-19 00:00:00",
]
df = pd.DataFrame(data_date, columns=["dt"])
sdf = spark.createDataFrame(df).withColumn(
"dt", to_timestamp("dt", "yyyy-MM-dd HH:mm:ss")
)
expected = {
"data": {
"binWidth": 2592000000000000.0,
"bins": {"108": 9.0, "109": 1.0},
"bins:type": "Count",
"entries": 10.0,
"name": "'dt'",
"nanflow": 0.0,
"nanflow:type": "Count",
"origin": 1.2625632e18,
},
"type": "SparselyBin",
"version": "1.0",
}
filler = SparkHistogrammar(features=["dt"])
current_hists = filler.get_histograms(sdf)
assert current_hists["dt"].toJson() == expected
# @pytest.mark.spark
@pytest.mark.skipif(not spark_found, reason="spark not found")
@pytest.mark.filterwarnings(
"ignore:createDataFrame attempted Arrow optimization because"
)
def test_get_histograms_date(spark_co):
from pyspark.sql.functions import to_date
spark = spark_co
data_date = [
"2018-12-10",
"2018-12-10",
"2018-12-10",
"2018-12-10",
"2018-12-10",
"2018-12-17",
"2018-12-17",
"2018-12-17",
"2018-12-17",
"2018-12-19",
]
df = pd.DataFrame(data_date, columns=["dt"])
sdf = spark.createDataFrame(df).withColumn("dt", to_date("dt", "yyyy-MM-dd"))
expected = {
"data": {
"binWidth": 2592000000000000.0,
"bins": {"108": 9.0, "109": 1.0},
"bins:type": "Count",
"entries": 10.0,
"name": "'dt'",
"nanflow": 0.0,
"nanflow:type": "Count",
"origin": 1.2625632e18,
},
"type": "SparselyBin",
"version": "1.0",
}
filler = SparkHistogrammar(features=["dt"])
current_hists = filler.get_histograms(sdf)
assert current_hists["dt"].toJson() == expected
# @pytest.mark.spark
@pytest.mark.skipif(not spark_found, reason="spark not found")
@pytest.mark.filterwarnings(
"ignore:createDataFrame attempted Arrow optimization because"
)
def test_null_histograms(spark_co):
spark = spark_co
data = [(None, None, None, None), (1, None, None, 2.), (None, True, "Jones", None), (3, True, "USA", 4.),
(4, False, "FL", 5.)]
columns = ["transaction", "isActive", "eyeColor", "t2"]
sdf = spark.createDataFrame(data=data, schema=columns)
hists = make_histograms(sdf, bin_specs={'transaction': {'num': 40, 'low': 0, 'high': 10}})
assert 'transaction' in hists
assert 'isActive' in hists
assert 'eyeColor' in hists
assert 't2' in hists
h = hists['transaction']
assert h.nanflow.entries == 2
h = hists['t2']
assert h.nanflow.entries == 2
h = hists['isActive']
assert 'NaN' in h.bins
assert h.bins['NaN'].entries == 2
h = hists['eyeColor']
assert 'NaN' in h.bins
assert h.bins['NaN'].entries == 2
| {
"content_hash": "aaf19ce4b4a688758bbddc4de769fbd4",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 109,
"avg_line_length": 31.556701030927837,
"alnum_prop": 0.5915278231514756,
"repo_name": "histogrammar/histogrammar-python",
"id": "6e5e09f07c2580c60c079e54e52a0233aa5f9549",
"size": "9183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_spark_histogrammar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "26341"
},
{
"name": "GAP",
"bytes": "6751"
},
{
"name": "Jupyter Notebook",
"bytes": "49191"
},
{
"name": "Python",
"bytes": "1718940"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import bpy
import os
import sys
import argparse
from string import Template
from io import open
def main():
# Drop everything before '--'
args = sys.argv[sys.argv.index('--')+1:]
parser = argparse.ArgumentParser(description='Render.')
parser.add_argument('mimetype')
parser.add_argument('subname')
parser.add_argument('destination')
args = parser.parse_args(args)
text = bpy.data.texts[args.subname]
if not os.path.exists(os.path.dirname(args.destination)):
os.makedirs(os.path.dirname(args.destination))
data = text.as_string()
if data.strip() == '':
data = 'application/x-blender.text\n EMPTY'
print(('-'*70))
print(data)
print(('-'*70))
with open(args.destination, 'wb') as file:
file.write(bytes(data, 'UTF-8'))
file.flush()
if __name__ == '__main__':
main()
| {
"content_hash": "e9de843b5e79a8311fab61407bae8f4f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 61,
"avg_line_length": 23.097560975609756,
"alnum_prop": 0.6378035902851109,
"repo_name": "peragro/peragro-at",
"id": "76ee3085042e09f23442235639f2dd03ba1d2ca7",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/damn_at/transcoders/mesh/blender/text/b-script-transcoderblendertext.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "300042"
},
{
"name": "Shell",
"bytes": "3259"
},
{
"name": "Thrift",
"bytes": "1639"
}
],
"symlink_target": ""
} |
"""initial migration
Revision ID: 2daaf886cde0
Revises: 50fabd562efc
Create Date: 2017-03-01 15:26:38.680187
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2daaf886cde0'
down_revision = '50fabd562efc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('roles', sa.Column('default', sa.Boolean(), nullable=True))
op.add_column('roles', sa.Column('permissions', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('roles', 'permissions')
op.drop_column('roles', 'default')
# ### end Alembic commands ###
| {
"content_hash": "6e1d2cb1794cd4db0f17a132e9f266dc",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 26.033333333333335,
"alnum_prop": 0.6824583866837388,
"repo_name": "TomSGao/blogTom",
"id": "551950721b00e54772b015e36bb357d65455bb4e",
"size": "781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/2daaf886cde0_initial_migration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1854"
},
{
"name": "HTML",
"bytes": "12914"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "42413"
}
],
"symlink_target": ""
} |
import os
import imp
from .plugins.Base import *
from ckstyle.cmdconsole.ConsoleClass import console
from ckstyle.browsers.BinaryRule import ALL
class CssChecker():
'''CSS检查类,需要CSS解析器作为辅助'''
def __init__(self, parser, config = None):
self.parser = parser
self.config = config
# 错误记录,log是2级,warn是1级,error是0级
self.logMsgs = []
self.warningMsgs = []
self.errorMsgs = []
# 额外的错误记录,比如工具内部的一些错误等
self.extraMsgs = []
# 注册的不同类型的检查器(都来自plugins目录)
self.ruleSetCheckers = []
self.ruleCheckers = []
self.styleSheetCheckers = []
self.extraCheckers = []
# 如果有解析过程的错误,则先把那些错误记录下来
self.handleParseErrors()
def resetStyleSheet(self):
self.parser.styleSheet.rebase()
def getStyleSheet(self):
'''获取styleSheet引用'''
return self.parser.styleSheet
def handleParseErrors(self):
for msg in self.parser.getParseErrors():
self.remember(msg[0], msg[1])
def hasError(self):
'''判断是否有error'''
return len(self.logMsgs) != 0 or len(self.warningMsgs) != 0 or len(self.errorMsgs) != 0
def errors(self):
'''把错误信息导出'''
return self.logMsgs, self.warningMsgs, self.errorMsgs
def loadPlugins(self, pluginDir, debug = True):
'''如果是debug模式,则从细小的plugin文件中载入插件,否则,从大文件中载入插件'''
if debug:
self.loadFromSubFiles(pluginDir)
self.loadFromUserPlugins()
else:
self.loadFromBigFile(pluginDir)
def loadFromBigFile(self, pluginDir):
'''从AllRules.py动态载入检查类'''
plugin = __import__("ckstyle.plugins.AllRules", fromlist = ['AllRules'])
props = dir(plugin)
for prop in props:
if not prop.startswith('FED'):
continue
pluginClass = getattr(plugin, prop)
self.registerPluginClass(pluginClass)
def loadFromUserPlugins(self):
'''允许用户通过ckstyle install添加plugin,执行的时候载入之'''
include = self.config.include
exclude = self.config.exclude
safeMode = self.config.safeMode
safeModeExcludes = 'combine-same-rulesets'
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
root = os.path.realpath(os.path.join(__file__, '../userplugins/plugins'))
modulePath = self.getModulePath(root)
pluginClassName = 'PluginClass'
for filename in os.listdir(root):
if filename.startswith('.') or filename.startswith('_'):
continue
if not os.path.isdir(os.path.realpath(os.path.join(root, filename))):
continue
plugin = None
try:
plugin = import_module(modulePath + filename + '.index')
except Exception as e:
console.showError('Orz... can not load %s' % modulePath + filename + '.index')
continue
pluginClass = None
if hasattr(plugin, pluginClassName):
pluginClass = getattr(plugin, pluginClassName)
else:
console.showError('class %s should exist in %s.py' % (pluginClassName, filename + '/index'))
continue
self.registerPluginClass(pluginClass)
def loadFromSubFiles(self, pluginDir):
'''从plugins目录动态载入检查类'''
modulePath = self.getModulePath(pluginDir)
for filename in os.listdir(pluginDir):
if not filename.endswith('.py') or filename.startswith('_'):
continue
if filename == 'Base.py' or filename == 'helper.py':
continue
pluginName = os.path.splitext(filename)[0]
# 获取plugins的引用
plugin = __import__(modulePath + pluginName, fromlist = [pluginName])
pluginClass = None
if hasattr(plugin, pluginName):
pluginClass = getattr(plugin, pluginName)
else:
console.error('[TOOL] class %s should exist in %s.py' % (pluginName, pluginName))
continue
self.registerPluginClass(pluginClass)
def registerPluginClass(self, pluginClass):
include = self.config.include
exclude = self.config.exclude
safeMode = self.config.safeMode
safeModeExcludes = 'combine-same-rulesets'
# 构造plugin的类
instance = pluginClass()
# 如果是private,则说明不论是否选择都需要的规则
if not hasattr(instance, 'private') or getattr(instance, 'private') is not True:
if include != 'all' and include.find(instance.id) == -1:
return
elif exclude != 'none' and exclude.find(instance.id) != -1:
return
elif safeMode and safeModeExcludes.find(instance.id) != -1:
return
self.registerChecker(instance)
def getModulePath(self, pluginDir):
transDir = pluginDir.replace('\\', '/')
splited = transDir.split('/ckstyle/')[1]
modulePath = 'ckstyle.' + splited.replace('/', '.') + '.'
return modulePath
def registerChecker(self, checker):
'''根据检查器类型的不同,分别注册到不同的检查器列表中'''
if isinstance(checker, RuleChecker):
self.registerRuleChecker(checker)
elif isinstance(checker, RuleSetChecker):
self.registerRuleSetChecker(checker)
elif isinstance(checker, StyleSheetChecker):
self.registerStyleSheetChecker(checker)
else:
self.registerExtraChecker(checker)
def registerStyleSheetChecker(self, checker):
self.styleSheetCheckers.append(checker)
def registerRuleSetChecker(self, checker):
self.ruleSetCheckers.append(checker)
def registerRuleChecker(self, checker):
self.ruleCheckers.append(checker)
def registerExtraChecker(self, checker):
self.extraCheckers.append(checker)
def remember(self, errorLevel, errorMsg):
'''记录代码中的问题'''
if errorLevel == ERROR_LEVEL.LOG:
if self.config.errorLevel > 1:
self.logMsgs.append(errorMsg)
elif errorLevel == ERROR_LEVEL.WARNING:
if self.config.errorLevel > 0:
self.warningMsgs.append(errorMsg)
elif errorLevel == ERROR_LEVEL.ERROR:
self.errorMsgs.append(errorMsg)
else:
console.error('[TOOL] wrong ErrorLevel for ' + errorMsg)
def logStyleSheetMessage(self, checker, styleSheet, errors = None):
'''记录StyleSheet的问题'''
errorLevel = checker.getLevel()
if errors is None:
errors = [checker.getMsg()]
for errorMsg in errors:
obj = {}
if errorMsg is None or errorMsg == '':
console.error('[TOOL] no errorMsg in your plugin, please check it')
#if errorMsg.find('${file}') == -1:
# errorMsg = errorMsg + ' (from "' + styleSheet.getFile() + '")'
#else:
# errorMsg = errorMsg.replace('${file}', styleSheet.getFile())
obj["errorMsg"] = errorMsg
obj["file"] = styleSheet.getFile()
obj["level"] = 'stylesheet'
self.remember(errorLevel, obj);
def logRuleMessage(self, checker, rule, errors = None):
'''记录一条key/value的问题'''
errorLevel = checker.getLevel()
if errors is None:
errors = [checker.getMsg()]
for errorMsg in errors:
obj = {}
if errorMsg is None or errorMsg == '':
console.error('[TOOL] no errorMsg in your plugin, please check it')
#if errorMsg.find('${selector}') == -1:
# errorMsg = errorMsg + ' (from "' + rule.selector + '")'
#else:
# errorMsg = errorMsg.replace('${selector}', rule.selector)
#errorMsg = errorMsg.replace('${name}', rule.roughName.strip())
#errorMsg = errorMsg.replace('${value}', rule.value.strip())
obj["errorMsg"] = errorMsg
obj["selector"] = rule.selector
obj["name"] = rule.roughName.strip()
obj["value"] = rule.value.strip()
obj["level"] = 'rule'
self.remember(errorLevel, obj);
def logRuleSetMessage(self, checker, ruleSet, errors = None):
'''记录一个"规则集"中的问题'''
errorLevel = checker.getLevel()
if errors is None:
errors = [checker.getMsg()]
for errorMsg in errors:
obj = {}
#if errorMsg.find('${selector}') == -1:
# errorMsg = errorMsg + ' (from "' + ruleSet.selector + '")'
#else:
# errorMsg = errorMsg.replace('${selector}', ruleSet.selector)
obj["errorMsg"] = errorMsg
obj["selector"] = ruleSet.selector
obj["level"] = 'ruleset'
self.remember(errorLevel, obj);
def doCompress(self, browser = ALL):
self.config._curBrowser = browser
self.doFix()
return self.getStyleSheet().compress(browser).strip()
def doFix(self, browser = ALL):
#self.resetStyleSheet()
# 忽略的规则集(目前只忽略单元测试的selector)
ignoreRuleSets = self.config.ignoreRuleSets
def findInArray(array, value):
return value in array or value.strip() in array
# fix规则集
def fixRuleSet(ruleSet):
for checker in self.ruleSetCheckers:
if not hasattr(checker, 'fix'):
continue
if ruleSet.fixedSelector == '':
ruleSet.fixedSelector = ruleSet.selector
ruleSet.fixedComment = ruleSet.comment
checker.fix(ruleSet, self.config)
# fix规则
def fixRules(ruleSet):
for checker in self.ruleCheckers:
for rule in ruleSet.getRules():
if not hasattr(checker, 'fix'):
continue
# 确保fixedName/fixedValue一定有值
# fix中一定要针对fixedName/fixedValue来判断,确保其他plugin的fix不会被覆盖
if rule.fixedValue == '':
rule.fixedValue = rule.value
rule.fixedName = rule.strippedName
#print checker.id, checker, rule.fixedValue
checker.fix(rule, self.config)
def fixExtraRules(ruleSet):
for checker in self.extraCheckers:
if not hasattr(checker, 'fix'):
continue
if ruleSet.fixedSelector == '':
ruleSet.fixedSelector = ruleSet.selector
ruleSet.fixedStatement = ruleSet.statement
checker.fix(ruleSet, self.config)
styleSheet = self.parser.styleSheet
for ruleSet in styleSheet.getRuleSets():
if ruleSet.extra:
fixExtraRules(ruleSet)
continue
# 判断此规则是否忽略
if findInArray(ignoreRuleSets, ruleSet.selector):
continue
# 先fix rule
fixRules(ruleSet)
# 再fix ruleSet
fixRuleSet(ruleSet)
# 最后fix styleSheet
for checker in self.styleSheetCheckers:
if hasattr(checker, 'fix'):
checker.fix(styleSheet, self.config)
return self.getStyleSheet().fixed(self.config)
def doCheck(self):
# 忽略的规则集(目前只忽略单元测试的selector)
ignoreRuleSets = self.config.ignoreRuleSets
def findInArray(array, value):
return value in array or value.strip() in array
def isBoolean(value):
return type(value) == type(True)
def isList(value):
return isinstance(value, list)
# 检查规则集
def checkRuleSet(ruleSet):
for checker in self.ruleSetCheckers:
if not hasattr(checker, 'check'):
continue
result = checker.check(ruleSet, self.config)
if isBoolean(result):
if not result:
self.logRuleSetMessage(checker, ruleSet)
elif isList(result) and len(result) != 0:
self.logRuleSetMessage(checker, ruleSet, result)
else:
console.error('check should be boolean/list, %s is not.' % checker.id)
# 检查规则
def checkRule(ruleSet):
for checker in self.ruleCheckers:
for rule in ruleSet.getRules():
if not hasattr(checker, 'check'):
continue
result = checker.check(rule, self.config)
if isBoolean(result):
if not result:
self.logRuleMessage(checker, rule)
elif isList(result) and len(result) != 0:
self.logRuleMessage(checker, rule, result)
else:
console.error('check should be boolean/list, %s is not.' % checker.id)
# 检查规则
def checkExtraRule(ruleSet):
for checker in self.extraCheckers:
if not hasattr(checker, 'check'):
continue
result = checker.check(ruleSet, self.config)
if isBoolean(result):
if not result:
self.logRuleSetMessage(checker, ruleSet)
elif isList(result) and len(result) != 0:
self.logRuleSetMessage(checker, ruleSet, result)
else:
console.error('check should be boolean/list, %s is not.' % checker.id)
# 检查样式表
styleSheet = self.parser.styleSheet
for checker in self.styleSheetCheckers:
if not hasattr(checker, 'check'):
continue
result = checker.check(styleSheet, self.config)
if isBoolean(result):
if not result:
self.logStyleSheetMessage(checker, styleSheet)
elif isList(result) and len(result) != 0:
self.logStyleSheetMessage(checker, styleSheet, result)
else:
console.error('check should be boolean/list, %s is not.' % checker.id)
for ruleSet in styleSheet.getRuleSets():
if ruleSet.extra:
checkExtraRule(ruleSet)
continue
# 判断此规则是否忽略
if findInArray(ignoreRuleSets, ruleSet.selector):
continue
checkRuleSet(ruleSet)
checkRule(ruleSet)
| {
"content_hash": "0652b339eb0842d3b30eaccbb9825e1a",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 108,
"avg_line_length": 37.60769230769231,
"alnum_prop": 0.5576464171268835,
"repo_name": "wangjeaf/CSSCheckStyle",
"id": "e0dfe64b9fcbcb27d361eed9bb4d3c3c766446bf",
"size": "15419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ckstyle/CssCheckerWrapper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "373226"
},
{
"name": "Shell",
"bytes": "928"
},
{
"name": "VimL",
"bytes": "1871"
}
],
"symlink_target": ""
} |
import os
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
BASE_DIR = PACKAGE_ROOT
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "dev.db",
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = int(os.environ.get("SITE_ID", 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/site_media/static/"
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(PACKAGE_ROOT, "static"),
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# Make this unique, and don"t share it with anybody.
SECRET_KEY = "i^1kykkqv4^i1!bl%aak-pjip!)j9wc00)9+!sw-v#t(_)jo_u"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"pinax_theme_bootstrap.context_processors.theme",
"pinaxbootcamp.context_processors.settings",
"account.context_processors.account",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"account.middleware.LocaleMiddleware",
"account.middleware.TimezoneMiddleware",
]
ROOT_URLCONF = "pinaxbootcamp.urls"
# Python dotted path to the WSGI application used by Django"s runserver.
WSGI_APPLICATION = "pinaxbootcamp.wsgi.application"
TEMPLATE_DIRS = [
os.path.join(PACKAGE_ROOT, "templates"),
]
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
# theme
"bootstrapform",
"pinax_theme_bootstrap",
# external
"pinax.blog",
"account",
#analytics
"metron",
# project
"pinaxbootcamp",
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
}
}
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
METRON_SETTINGS = {
"mixpanel": {
1: "b18204ebc6c21bb99af73417e7081e22",
},
"google": {
1: "UA-62140011-1",
},
}
| {
"content_hash": "c833c33d8e707e0e45899facd00d3b82",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 82,
"avg_line_length": 29.81081081081081,
"alnum_prop": 0.6951949229374433,
"repo_name": "sargentstudley/pinaxbootcamp",
"id": "5561a480853cd7b435676bd5df42bfa931063738",
"size": "5515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinaxbootcamp/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30419"
},
{
"name": "HTML",
"bytes": "5538"
},
{
"name": "Makefile",
"bytes": "1404"
},
{
"name": "Python",
"bytes": "9153"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DailyStatus'
db.create_table('player_mgr_dailystatus', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('daily_visitors', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('player_mgr', ['DailyStatus'])
def backwards(self, orm):
# Deleting model 'DailyStatus'
db.delete_table('player_mgr_dailystatus')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 25, 17, 12, 43, 14393)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 25, 17, 12, 43, 14256)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'player_mgr.dailystatus': {
'Meta': {'object_name': 'DailyStatus'},
'daily_visitors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'player_mgr.profile': {
'Meta': {'object_name': 'Profile'},
'completion_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'contact_carrier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'contact_text': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'daily_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_visit_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'referrer_awarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'referring_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'referred_profiles'", 'null': 'True', 'to': "orm['auth.User']"}),
'setup_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Team']", 'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'team_mgr.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'})
},
'team_mgr.team': {
'Meta': {'object_name': 'Team'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['team_mgr.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'})
}
}
complete_apps = ['player_mgr']
| {
"content_hash": "125ad9298d5661167d4a48443ab24a5b",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 182,
"avg_line_length": 71.95192307692308,
"alnum_prop": 0.5489776827475611,
"repo_name": "KendyllD/boukenda-project",
"id": "2c9253eb2db2322f69ca803115d7bace6c97eb7b",
"size": "7501",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "makahiki/apps/managers/player_mgr/migrations/0002_auto__add_dailystatus.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import functools
import socket
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.http import HttpResponseForbidden
from djerlyvideo.models import Server
from djerlyvideo.utils import get_remote_address
__SERVER_IPS = []
def test_access(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
global __SERVER_IPS
remote_addr = get_remote_address(request)
servers = Server.objects.active().all()
for server in servers:
ip = socket.gethostbyname(server.host)
__SERVER_IPS.append(ip)
if remote_addr in __SERVER_IPS:
return f(request, *args, **kw)
return HttpResponseForbidden()
return wrapper
@receiver(post_save, sender=Server)
def server_post_save(**kwargs):
global __SERVER_IPS
__SERVER_IPS = [] | {
"content_hash": "1d17e781a0eed0dd4b0f13ced1422acf",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 50,
"avg_line_length": 24.485714285714284,
"alnum_prop": 0.6721120186697783,
"repo_name": "plazix/django-erlyvideo",
"id": "4fae9a3f7b97aa6593f1f726ec0feabbbb7c2bef",
"size": "882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djerlyvideo/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23177"
}
],
"symlink_target": ""
} |
from __future__ import print_function, absolute_import
__author__ = 'katharine'
from six import with_metaclass, iteritems
from binascii import hexlify
import collections
import logging
import struct
from libpebble2.exceptions import IncompleteMessage
from .types import Field, DEFAULT_ENDIANNESS
__all__ = ["PebblePacket"]
logger = logging.getLogger("libpebble2.protocol")
_PacketRegistry = {}
def make_output(thing):
class C(object):
def __repr__(self):
return thing
return C()
class PacketType(type):
"""
Metaclass for :class:`PebblePacket` that transforms properties that are subclasses of :class:`Field` into a
Pebble Protocol parser.
"""
def __new__(mcs, name, bases, dct):
mapping = []
# If we have a _Meta property, delete it.
if '_Meta' in dct:
del dct['_Meta']
# If we have a Meta property, move it to _Meta. This effectively prevents it being inherited.
if 'Meta' in dct:
dct['_Meta'] = dct['Meta'].__dict__
del dct['Meta']
# For each Field, add it to our mapping, then set the exposed value to its default value.
# We go through the classes we inherited from to add anything in there.
# This means that inheritance works, with inherited classes appending their fields to the end.
dct['_type_mapping'] = collections.OrderedDict()
for base in bases:
if hasattr(base, '_type_mapping'):
dct['_type_mapping'].update(getattr(base, '_type_mapping'))
for k, v in iteritems(dct):
if not isinstance(v, Field):
continue
v._name = k
mapping.append((k, v))
dct[k] = v._default
# Put the results into an ordered dict. We sort on field_id to ensure that our dict ends up
# in the correct order.
dct['_type_mapping'].update(collections.OrderedDict(sorted(mapping, key=lambda x: x[1].field_id)))
return super(PacketType, mcs).__new__(mcs, name, bases, dct)
def __init__(cls, name, bases, dct):
# At this point we actually have a references to the class, so we can register it
# in our packet type registry for later decoding.
if hasattr(cls, '_Meta'):
if 'endpoint' in cls._Meta and cls._Meta.get('register', True):
_PacketRegistry[cls._Meta['endpoint']] = cls
# Fill in all of the fields with a reference to this class.
# TODO: This isn't used any more; remove it?
for k, v in iteritems(cls._type_mapping):
v._parent = cls
super(PacketType, cls).__init__(name, bases, dct)
def __repr__(self):
return self.__name__
class PebblePacket(with_metaclass(PacketType)):
"""
Represents some sort of Pebble Protocol message.
A PebblePacket can have an inner class named ``Meta`` containing some information about the property:
================== ===============================================================================================
**endpoint** The Pebble Protocol endpoint that is represented by this message.
**endianness** The endianness of the packet. The default endianness is big-endian, but it can be overridden by
packets and fields, with the priority:
**register** If set to ``False``, the packet will not be registered and thus will be ignored by
:meth:`parse_message`. This is useful when messages are ambiguous, and distinguished only by
whether they are sent to or from the Pebble.
================== ===============================================================================================
A sample packet might look like this: ::
class AppFetchResponse(PebblePacket):
class Meta:
endpoint = 0x1771
endianness = '<'
register = False
command = Uint8(default=0x01)
response = Uint8(enum=AppFetchStatus)
:param \*\*kwargs: Initial values for any properties on the object.
"""
def __init__(self, **kwargs):
for k, v in iteritems(kwargs):
if k.startswith('_'):
raise AttributeError("You cannot set internal properties during construction.")
getattr(self, k) # Throws an exception if the property doesn't exist.
setattr(self, k, v)
def serialise(self, default_endianness=None):
"""
Serialise a message, without including any framing.
:param default_endianness: The default endianness, unless overridden by the fields or class metadata.
Should usually be left at ``None``. Otherwise, use ``'<'`` for little endian and
``'>'`` for big endian.
:type default_endianness: str
:return: The serialised message.
:rtype: bytes
"""
# Figure out an endianness.
endianness = (default_endianness or DEFAULT_ENDIANNESS)
if hasattr(self, '_Meta'):
endianness = self._Meta.get('endianness', endianness)
inferred_fields = set()
for k, v in iteritems(self._type_mapping):
inferred_fields |= {x._name for x in v.dependent_fields()}
for field in inferred_fields:
setattr(self, field, None)
# Some fields want to manipulate other fields that appear before them (e.g. Unions)
for k, v in iteritems(self._type_mapping):
v.prepare(self, getattr(self, k))
message = b''
for k, v in iteritems(self._type_mapping):
message += v.value_to_bytes(self, getattr(self, k), default_endianness=endianness)
return message
def serialise_packet(self):
"""
Serialise a message, including framing information inferred from the ``Meta`` inner class of the packet.
``self.Meta.endpoint`` must be defined to call this method.
:return: A serialised message, ready to be sent to the Pebble.
"""
if not hasattr(self, '_Meta'):
raise ReferenceError("Can't serialise a packet that doesn't have an endpoint ID.")
serialised = self.serialise()
return struct.pack('!HH', len(serialised), self._Meta['endpoint']) + serialised
@classmethod
def parse_message(cls, message):
"""
Parses a message received from the Pebble. Uses Pebble Protocol framing to figure out what sort of packet
it is. If the packet is registered (has been defined and imported), returns the deserialised packet, which will
not necessarily be the same class as this. Otherwise returns ``None``.
Also returns the length of the message consumed during deserialisation.
:param message: A serialised message received from the Pebble.
:type message: bytes
:return: ``(decoded_message, decoded length)``
:rtype: (:class:`PebblePacket`, :any:`int`)
"""
length = struct.unpack_from('!H', message, 0)[0] + 4
if len(message) < length:
raise IncompleteMessage()
command, = struct.unpack_from('!H', message, 2)
if command in _PacketRegistry:
return _PacketRegistry[command].parse(message[4:length])[0], length
else:
return None, length
@classmethod
def parse(cls, message, default_endianness=DEFAULT_ENDIANNESS):
"""
Parses a message without any framing, returning the decoded result and length of message consumed. The result
will always be of the same class as :meth:`parse` was called on. If the message is invalid,
:exc:`.PacketDecodeError` will be raised.
:param message: The message to decode.
:type message: bytes
:param default_endianness: The default endianness, unless overridden by the fields or class metadata.
Should usually be left at ``None``. Otherwise, use ``'<'`` for little endian and
``'>'`` for big endian.
:return: ``(decoded_message, decoded length)``
:rtype: (:class:`PebblePacket`, :any:`int`)
"""
obj = cls()
offset = 0
if hasattr(cls, '_Meta'):
default_endianness = cls._Meta.get('endianness', default_endianness)
for k, v in iteritems(cls._type_mapping):
try:
value, length = v.buffer_to_value(obj, message, offset, default_endianness=default_endianness)
except Exception:
logger.warning("Exception decoding {}.{}".format(cls.__name__, k))
raise
offset += length
setattr(obj, k, value)
return obj, offset
def __repr__(self):
return "%s(%s)" % (type(self).__name__,
', '.join('%s=%s' % (k, self._format_repr(getattr(self, k))) for k in self._type_mapping.keys()))
def __eq__(self, other):
if not isinstance(other, PebblePacket):
return NotImplemented
if type(self) != type(other):
return False
for k in self._type_mapping:
if getattr(self, k) != getattr(other, k):
return False
return True
def __ne__(self, other):
return not (self == other)
def _format_repr(self, value):
if isinstance(value, bytes):
if len(value) < 20:
return hexlify(value).decode()
else:
return hexlify(value[:17]).decode() + '...'
else:
return value
| {
"content_hash": "df68d7a882fd49824a5b48afcd66b002",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 124,
"avg_line_length": 41.256410256410255,
"alnum_prop": 0.5830743733167599,
"repo_name": "pebble/libpebble2",
"id": "9d2c04d0a9827cda6d4c680d821afbc8640ef9fd",
"size": "9654",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libpebble2/protocol/base/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "212253"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.