text
stringlengths 4
1.02M
| meta
dict |
---|---|
from pattern.graph import Graph, Node, Edge
import json
class NoPathException(Exception):
pass
class DependencyParse(object):
def __init__(self, parse, enhanced=True):
self.parse = parse
self.g, self.edge, self.node, self.root = DependencyParse.make_graph(self.parse, enhanced)
@classmethod
def make_graph(cls, parse, enhanced=True):
edge_map, node_map = {}, {}
g = Graph()
root = None
for child, parent, arc in parse:
if arc == 'root':
root = child
if not enhanced:
arc = arc.split(':')[0]
if child not in node_map:
node_map[child] = Node(child)
child = node_map[child]
if parent not in node_map:
node_map[parent] = Node(parent)
parent = node_map[parent]
if parent.id != child.id:
g.add_edge(parent, child, type=arc)
return g, edge_map, node_map, root
@classmethod
def parent_of(cls, node):
parents = [e.node1 for e in node.edges if e.node2 == node]
return parents[0] if len(parents) else None
@classmethod
def get_head(cls, ent_tail, ent_start, ent_end):
seen = set()
while True:
parent = cls.parent_of(ent_tail)
if parent in seen:
raise Exception("found cycle!")
if parent is None or parent.id >= ent_end or parent.id < ent_start:
break
seen.add(parent)
ent_tail = parent
return ent_tail
@classmethod
def get_edge(cls, node1, node2):
edges = []
for edge in node1.edges:
if edge.node1 == node2:
edges.append(edge.type + '_from')
elif edge.node2 == node2:
edges.append(edge.type + '_to')
return edges
def get_path(self, node1, node2, g):
path = g.shortest_path(node1, node2, directed=False)
if path is None:
raise NoPathException("cannot find path between entities!")
curr = node1
edges = []
for node in path[1:]:
if curr.id == self.root:
edges.append([curr.id, None, 'root'])
edge = self.get_edge(curr, node)[0]
edges.append([curr.id, node.id, edge])
curr = node
return edges
def get_path_from_parse(self, subject_start, subject_end, object_start, object_end):
subject = self.node[subject_end-1], subject_start, subject_end
object = self.node[object_end-1], object_start, object_end
return self.get_path(
DependencyParse.get_head(*object),
DependencyParse.get_head(*subject),
self.g
)
| {
"content_hash": "ab9e2e1498ea4724e373e5ba1f1889ca",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 98,
"avg_line_length": 32.6,
"alnum_prop": 0.5438469866474197,
"repo_name": "vzhong/sent2rel",
"id": "334efcca93a4459a20d12a0954dcdd00ab3ec8c8",
"size": "2771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/dependency.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "545018"
},
{
"name": "Python",
"bytes": "74526"
},
{
"name": "Shell",
"bytes": "634"
}
],
"symlink_target": ""
} |
from otp.ai.AIBase import *
from toontown.toonbase import ToontownGlobals
from direct.distributed.ClockDelta import *
from toontown.building.ElevatorConstants import *
from toontown.building import DistributedElevatorFloorAI
from toontown.building import DistributedElevatorAI
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.task import Task
class DistributedLawOfficeElevatorIntAI(DistributedElevatorFloorAI.DistributedElevatorFloorAI):
def __init__(self, air, lawOfficeId, bldg, avIds):
DistributedElevatorFloorAI.DistributedElevatorFloorAI.__init__(self, air, bldg, avIds)
self.lawOfficeId = lawOfficeId
def getEntranceId(self):
return self.entranceId
def elevatorClosed(self):
numPlayers = self.countFullSeats()
if numPlayers > 0:
players = []
for i in self.seats:
if i not in [None, 0]:
players.append(i)
sittingAvIds = []
for seatIndex in range(len(self.seats)):
avId = self.seats[seatIndex]
if avId:
sittingAvIds.append(avId)
for avId in self.avIds:
if avId not in sittingAvIds:
print 'THIS AV ID %s IS NOT ON BOARD' % avId
self.bldg.startNextFloor()
else:
self.notify.warning('The elevator left, but was empty.')
self.fsm.request('closed')
return
def enterClosed(self):
print 'DistributedLawOfficeElevatorIntAI.elevatorClosed %s' % self.doId
DistributedElevatorFloorAI.DistributedElevatorFloorAI.enterClosed(self)
if not self.hasOpenedLocked or not self.isLocked:
self.fsm.request('opening')
if self.isLocked:
self.hasOpenedLocked = 1
| {
"content_hash": "6eec67d9ace542b754ba3904109b5944",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 95,
"avg_line_length": 36.6,
"alnum_prop": 0.6513661202185792,
"repo_name": "ksmit799/Toontown-Source",
"id": "2ebe47dcca64a1f1601c1e24944886f56a1a4070",
"size": "1830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/coghq/DistributedLawOfficeElevatorIntAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
"""The most basic test: check that the marker works.
"""
import pytest
def test_marker_registered(ctestdir):
result = ctestdir.runpytest("--markers")
result.stdout.fnmatch_lines("""
@pytest.mark.dependency*
""")
def test_marker(ctestdir):
ctestdir.makepyfile("""
import pytest
from pytest_dependency import DependencyManager
@pytest.mark.dependency()
def test_marker(request):
node = request.node.getparent(pytest.Module)
assert hasattr(node, 'dependencyManager')
assert isinstance(node.dependencyManager, DependencyManager)
assert 'test_marker' in node.dependencyManager.results
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=1)
| {
"content_hash": "53c8943978f937209e05f0fe62347a3f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 72,
"avg_line_length": 28.925925925925927,
"alnum_prop": 0.6619718309859155,
"repo_name": "RKrahl/pytest-dependency",
"id": "7be11f1829c1f7245175a239594185585dabd572",
"size": "781",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_01_marker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "577"
},
{
"name": "Python",
"bytes": "61407"
}
],
"symlink_target": ""
} |
""" Setup file """
import os
from setuptools import setup
HERE = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(HERE, 'README.markdown')).read()
REQUIREMENTS = [
]
TEST_REQUIREMENTS = [
'tox',
'pytest',
'pytest-cov',
'coverage',
'flake8'
]
if __name__ == "__main__":
setup(
name='simplex',
version='0.1.2',
description="simple subset of regex",
long_description=README,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
],
author='Joe Cross',
author_email='[email protected]',
url='https://github.com/numberoverzero/simplex',
license='MIT',
keywords='re regex regexp',
platforms='any',
include_package_data=True,
py_modules=['simplex'],
install_requires=REQUIREMENTS,
tests_require=REQUIREMENTS + TEST_REQUIREMENTS,
)
| {
"content_hash": "5c4e2bc9d8b54e2a64b7828a2f59d539",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 59,
"avg_line_length": 28.31111111111111,
"alnum_prop": 0.565149136577708,
"repo_name": "numberoverzero/simplex",
"id": "51a89e1ccc2299b05c153bbd17838fb9bead626e",
"size": "1274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4686"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
workspace_name: str,
subscription_id: str,
*,
skip_token: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/watchlists",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if skip_token is not None:
_params["$skipToken"] = _SERIALIZER.query("skip_token", skip_token, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, workspace_name: str, watchlist_alias: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/watchlists/{watchlistAlias}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"watchlistAlias": _SERIALIZER.url("watchlist_alias", watchlist_alias, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, workspace_name: str, watchlist_alias: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/watchlists/{watchlistAlias}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"watchlistAlias": _SERIALIZER.url("watchlist_alias", watchlist_alias, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, workspace_name: str, watchlist_alias: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/watchlists/{watchlistAlias}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"watchlistAlias": _SERIALIZER.url("watchlist_alias", watchlist_alias, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
class WatchlistsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.securityinsight.SecurityInsights`'s
:attr:`watchlists` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, workspace_name: str, skip_token: Optional[str] = None, **kwargs: Any
) -> Iterable["_models.Watchlist"]:
"""Gets all watchlists, without watchlist items.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param skip_token: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls. Optional.
Default value is None.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Watchlist or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.securityinsight.models.Watchlist]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.WatchlistList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
skip_token=skip_token,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WatchlistList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/watchlists"} # type: ignore
@distributed_trace
def get(
self, resource_group_name: str, workspace_name: str, watchlist_alias: str, **kwargs: Any
) -> _models.Watchlist:
"""Gets a watchlist, without its watchlist items.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param watchlist_alias: Watchlist Alias. Required.
:type watchlist_alias: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Watchlist or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.Watchlist
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.Watchlist]
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
watchlist_alias=watchlist_alias,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Watchlist", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/watchlists/{watchlistAlias}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, watchlist_alias: str, **kwargs: Any
) -> None:
"""Delete a watchlist.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param watchlist_alias: Watchlist Alias. Required.
:type watchlist_alias: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
watchlist_alias=watchlist_alias,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
if cls:
return cls(pipeline_response, None, response_headers)
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/watchlists/{watchlistAlias}"} # type: ignore
@overload
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
watchlist_alias: str,
watchlist: _models.Watchlist,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Watchlist:
"""Create or update a Watchlist and its Watchlist Items (bulk creation, e.g. through text/csv
content type). To create a Watchlist and its Items, we should call this endpoint with either
rawContent or a valid SAR URI and contentType properties. The rawContent is mainly used for
small watchlist (content size below 3.8 MB). The SAS URI enables the creation of large
watchlist, where the content size can go up to 500 MB. The status of processing such large file
can be polled through the URL returned in Azure-AsyncOperation header.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param watchlist_alias: Watchlist Alias. Required.
:type watchlist_alias: str
:param watchlist: The watchlist. Required.
:type watchlist: ~azure.mgmt.securityinsight.models.Watchlist
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Watchlist or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.Watchlist
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
watchlist_alias: str,
watchlist: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Watchlist:
"""Create or update a Watchlist and its Watchlist Items (bulk creation, e.g. through text/csv
content type). To create a Watchlist and its Items, we should call this endpoint with either
rawContent or a valid SAR URI and contentType properties. The rawContent is mainly used for
small watchlist (content size below 3.8 MB). The SAS URI enables the creation of large
watchlist, where the content size can go up to 500 MB. The status of processing such large file
can be polled through the URL returned in Azure-AsyncOperation header.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param watchlist_alias: Watchlist Alias. Required.
:type watchlist_alias: str
:param watchlist: The watchlist. Required.
:type watchlist: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Watchlist or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.Watchlist
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
watchlist_alias: str,
watchlist: Union[_models.Watchlist, IO],
**kwargs: Any
) -> _models.Watchlist:
"""Create or update a Watchlist and its Watchlist Items (bulk creation, e.g. through text/csv
content type). To create a Watchlist and its Items, we should call this endpoint with either
rawContent or a valid SAR URI and contentType properties. The rawContent is mainly used for
small watchlist (content size below 3.8 MB). The SAS URI enables the creation of large
watchlist, where the content size can go up to 500 MB. The status of processing such large file
can be polled through the URL returned in Azure-AsyncOperation header.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param watchlist_alias: Watchlist Alias. Required.
:type watchlist_alias: str
:param watchlist: The watchlist. Is either a model type or a IO type. Required.
:type watchlist: ~azure.mgmt.securityinsight.models.Watchlist or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Watchlist or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.Watchlist
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Watchlist]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(watchlist, (IO, bytes)):
_content = watchlist
else:
_json = self._serialize.body(watchlist, "Watchlist")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
watchlist_alias=watchlist_alias,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("Watchlist", pipeline_response)
if response.status_code == 201:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
deserialized = self._deserialize("Watchlist", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/watchlists/{watchlistAlias}"} # type: ignore
| {
"content_hash": "6996f4ef8389970339e72183d1de8083",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 257,
"avg_line_length": 45.91580756013746,
"alnum_prop": 0.6536317030273547,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3a2b8bdce0828317e0883c7962addb287eb0d4ec",
"size": "27223",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/securityinsight/azure-mgmt-securityinsight/azure/mgmt/securityinsight/operations/_watchlists_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""SCons.Tool.Perforce.py
Tool-specific initialization for Perforce Source Code Management system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/Perforce.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Util
# Variables that we want to import from the base OS environment.
_import_env = [ 'P4PORT', 'P4CLIENT', 'P4USER', 'USER', 'USERNAME', 'P4PASSWD',
'P4CHARSET', 'P4LANGUAGE', 'SystemRoot' ]
PerforceAction = SCons.Action.Action('$P4COM', '$P4COMSTR')
def generate(env):
"""Add a Builder factory function and construction variables for
Perforce to an Environment."""
def PerforceFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The Perforce() factory is deprecated and there is no replacement.""")
return SCons.Builder.Builder(action = PerforceAction, env = env)
env.Perforce = PerforceFactory
env['P4'] = 'p4'
env['P4FLAGS'] = SCons.Util.CLVar('')
env['P4COM'] = '$P4 $P4FLAGS sync $TARGET'
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Perforce seems to use the PWD environment variable rather than
# calling getcwd() for itself, which is odd. If no PWD variable
# is present, p4 WILL call getcwd, but this seems to cause problems
# with good ol' Windows's tilde-mangling for long file names.
environ['PWD'] = env.Dir('#').get_abspath()
for var in _import_env:
v = os.environ.get(var)
if v:
environ[var] = v
if SCons.Util.can_read_reg:
# If we can read the registry, add the path to Perforce to our environment.
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Perforce\\environment')
val, tok = SCons.Util.RegQueryValueEx(k, 'P4INSTROOT')
SCons.Util.AddPathIfNotExists(environ, 'PATH', val)
except SCons.Util.RegError:
# Can't detect where Perforce is, hope the user has it set in the
# PATH.
pass
def exists(env):
return env.Detect('p4')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "408b2f13c4ff8e9702a9a95027463b1e",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 118,
"avg_line_length": 36.83838383838384,
"alnum_prop": 0.6871401151631478,
"repo_name": "pzajda/eloquence",
"id": "47d609658dd2b6099010b52d93cf8896d6db6832",
"size": "3647",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scons-local-2.5.0/SCons/Tool/Perforce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1927564"
},
{
"name": "Smarty",
"bytes": "226"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Creare
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Always perfer setuptools over distutils
from setuptools import setup, find_packages, Extension
from codecs import open # To use a consistent encoding
import platform
from os.path import sep
from os import path
import os
import numpy as np
from Cython.Build import cythonize
here = os.path.join(path.dirname(__file__), 'pydem')
compile_args = []
compile_args.append("-O3")
if '32' in platform.architecture()[0]:
compile_args.append("-march=i386")
else:
compile_args.append("-march=x86-64")
# Pattern functions
path_cyfuncs = os.path.join(here, 'cyfuncs')
path_reader = os.path.join(here, 'reader')
extensions = [
Extension("pydem.cyfuncs.cyutils",
[os.path.join(path_cyfuncs, "cyutils.pyx")],
include_dirs=[np.get_include(), path_cyfuncs],
library_dirs=[],
extra_compile_args=compile_args,
language='c++'),
Extension("pydem.reader.inpaint",
[path_reader + sep + "inpaint.pyx"],
include_dirs=[np.get_include()],
library_dirs=[],
extra_compile_args=compile_args,
language='c++'),
]
setup(
ext_modules=cythonize(extensions),
name='pyDEM',
version='0.1.1',
description="Software for calculating Topographic Wetness Index (TWI)",
author='MPU, RXC',
url="https://github.com/creare-com/pydem",
license="APACHE 2.0",
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: GIS',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
],
packages=find_packages(),
install_requires=[
#'gdal',
'numpy',
'scipy',
'geopy',
'traits',
],
entry_points = {
'console_scripts' : ['TWIDinf=pydem.commandline_utils:TWIDinf',
'AreaDinf=pydem.commandline_utils:AreaDinf',
'DinfFlowDir=pydem.commandline_utils:DinfFlowDir']
}
)
| {
"content_hash": "aaf9fde5a78a030413a93020b83bc043",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 30.186274509803923,
"alnum_prop": 0.6294251380318285,
"repo_name": "perrygeo/pydem",
"id": "e26ffd1c575652f83becdebd803522ff7f8c5211",
"size": "3079",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "238730"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "Incapsula Web Application Firewall (Incapsula/Imperva)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = re.search(r"incap_ses|visid_incap", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
retval |= re.search(r"Incapsula", headers.get("X-CDN", ""), re.I) is not None
if retval:
break
return retval
| {
"content_hash": "badea7c188632db0cf0cda17e7e4b599",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 111,
"avg_line_length": 29.52173913043478,
"alnum_prop": 0.6715758468335787,
"repo_name": "JeyZeta/Dangerous",
"id": "0ba8138a4a25e009f7cd76d684bcb301c433eb5c",
"size": "702",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/tools/sqlmap/waf/incapsula.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
import os
import Image, StringIO
import allura
from nose.tools import assert_true
from ming.orm.ormsession import ThreadLocalORMSession
from allura import model as M
from allura.lib import helpers as h
from allura.tests import decorators as td
from alluratest.controller import TestController
from forgewiki import model
#---------x---------x---------x---------x---------x---------x---------x
# RootController methods exposed:
# index, new_page, search
# PageController methods exposed:
# index, edit, history, diff, raw, revert, update
# CommentController methods exposed:
# reply, delete
class TestRootController(TestController):
def setUp(self):
super(TestRootController, self).setUp()
self.setup_with_tools()
@td.with_wiki
def setup_with_tools(self):
pass
def test_root_index(self):
r = self.app.get('/wiki/tést/').follow()
assert 'tést' in r
assert 'Create Page' in r
# No 'Create Page' button if user doesn't have 'create' perm
r = self.app.get('/wiki/tést/',
extra_environ=dict(username='*anonymous')).follow()
assert 'Create Page' not in r
def test_root_markdown_syntax(self):
response = self.app.get('/wiki/markdown_syntax/')
assert 'Markdown Syntax' in response
def test_root_browse_tags(self):
response = self.app.get('/wiki/browse_tags/')
assert 'Browse Labels' in response
def test_root_browse_pages(self):
response = self.app.get('/wiki/browse_pages/')
assert 'Browse Pages' in response
def test_root_new_page(self):
response = self.app.get('/wiki/new_page?title=tést')
assert 'tést' in response
def test_root_new_search(self):
self.app.get('/wiki/tést/')
response = self.app.get('/wiki/search?q=tést')
assert 'Search wiki: tést' in response
def test_page_index(self):
response = self.app.get('/wiki/tést/')
assert 'tést' in response.follow()
def test_page_edit(self):
self.app.get('/wiki/tést/index')
response = self.app.post('/wiki/tést/edit')
assert 'tést' in response
def test_title_slashes(self):
# forward slash not allowed in wiki page title - converted to dash
response = self.app.post(
'/wiki/foo-bar/update',
params={
'title':'foo/bar',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'}).follow()
assert 'foo-bar' in response
assert 'foo-bar' in response.request.url
def test_dotted_page_name(self):
r = self.app.post(
'/wiki/page.dot/update',
params={
'title':'page.dot',
'text':'text1',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'}).follow()
assert 'page.dot' in r
def test_subpage_attempt(self):
self.app.get('/wiki/tést/')
self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'text1',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
assert '/p/test/wiki/Home/' in self.app.get('/wiki/tést/Home/')
self.app.get('/wiki/tést/notthere/', status=404)
def test_page_history(self):
self.app.get('/wiki/tést/')
self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'text1',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'text2',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
response = self.app.get('/wiki/tést/history')
assert 'tést' in response
# two revisions are shown
assert '2 by Test Admin' in response
assert '1 by Test Admin' in response
# you can revert to an old revison, but not the current one
assert response.html.find('a',{'href':'./revert?version=1'})
assert not response.html.find('a',{'href':'./revert?version=2'})
response = self.app.get('/wiki/tést/history', extra_environ=dict(username='*anonymous'))
# two revisions are shown
assert '2 by Test Admin' in response
assert '1 by Test Admin' in response
# you cannot revert to any revision
assert not response.html.find('a',{'href':'./revert?version=1'})
assert not response.html.find('a',{'href':'./revert?version=2'})
def test_page_diff(self):
self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
self.app.post('/wiki/tést/revert', params=dict(version='1'))
response = self.app.get('/wiki/tést/')
assert 'Subscribe' in response
response = self.app.get('/wiki/tést/diff?v1=0&v2=0')
assert 'tést' in response
def test_page_raw(self):
self.app.post(
'/wiki/TEST/update',
params={
'title':'TEST',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
response = self.app.get('/wiki/TEST/raw')
assert 'TEST' in response
def test_page_revert_no_text(self):
self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
response = self.app.post('/wiki/tést/revert', params=dict(version='1'))
assert '.' in response.json['location']
response = self.app.get('/wiki/tést/')
assert 'tést' in response
def test_page_revert_with_text(self):
self.app.get('/wiki/tést/')
self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
response = self.app.post('/wiki/tést/revert', params=dict(version='1'))
assert '.' in response.json['location']
response = self.app.get('/wiki/tést/')
assert 'tést' in response
def test_page_update(self):
self.app.get('/wiki/tést/')
response = self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
assert 'tést' in response
def test_page_label_unlabel(self):
self.app.get('/wiki/tést/')
response = self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'sometext',
'labels':'yellow,green',
'labels_old':'yellow,green',
'viewable_by-0.id':'all'})
assert 'tést' in response
response = self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'sometext',
'labels':'yellow',
'labels_old':'yellow',
'viewable_by-0.id':'all'})
assert 'tést' in response
def test_new_attachment(self):
self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
content = file(__file__).read()
self.app.post('/wiki/tést/attach', upload_files=[('file_info', 'test_root.py', content)])
response = self.app.get('/wiki/tést/')
assert 'test_root.py' in response
def test_new_text_attachment_content(self):
self.app.post(
'/wiki/tést/update',
params={
'title':'tést',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
file_name = 'test_root.py'
file_data = file(__file__).read()
upload = ('file_info', file_name, file_data)
self.app.post('/wiki/tést/attach', upload_files=[upload])
page_editor = self.app.get('/wiki/tést/edit')
download = page_editor.click(description=file_name)
assert_true(download.body == file_data)
def test_new_image_attachment_content(self):
self.app.post('/wiki/TEST/update', params={
'title':'TEST',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
file_name = 'neo-icon-set-454545-256x350.png'
file_path = os.path.join(allura.__path__[0],'nf','allura','images',file_name)
file_data = file(file_path).read()
upload = ('file_info', file_name, file_data)
self.app.post('/wiki/TEST/attach', upload_files=[upload])
h.set_context('test', 'wiki', neighborhood='Projects')
page = model.Page.query.find(dict(title='TEST')).first()
filename = page.attachments.first().filename
uploaded = Image.open(file_path)
r = self.app.get('/wiki/TEST/attachment/'+filename)
downloaded = Image.open(StringIO.StringIO(r.body))
assert uploaded.size == downloaded.size
r = self.app.get('/wiki/TEST/attachment/'+filename+'/thumb')
thumbnail = Image.open(StringIO.StringIO(r.body))
assert thumbnail.size == (255,255)
# Make sure thumbnail is present
r = self.app.get('/wiki/TEST/')
img_srcs = [ i['src'] for i in r.html.findAll('img') ]
assert ('/p/test/wiki/TEST/attachment/' + filename + '/thumb') in img_srcs, img_srcs
# Update the page to embed the image, make sure the thumbnail is absent
self.app.post('/wiki/TEST/update', params=dict(
title='TEST',
text='sometext\n[[img src=%s alt=]]' % file_name))
r = self.app.get('/wiki/TEST/')
img_srcs = [ i['src'] for i in r.html.findAll('img') ]
assert ('/p/test/wiki/TEST/attachment/' + filename) not in img_srcs, img_srcs
assert ('./attachment/' + file_name) in img_srcs, img_srcs
def test_sidebar_static_page(self):
response = self.app.get('/wiki/tést/')
assert 'Edit this page' not in response
assert 'Related Pages' not in response
def test_related_links(self):
response = self.app.get('/wiki/TEST/').follow()
assert 'Edit TEST' in response
assert 'Related' not in response
self.app.post('/wiki/TEST/update', params={
'title':'TEST',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
self.app.post('/wiki/aaa/update', params={
'title':'aaa',
'text':'',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
self.app.post('/wiki/bbb/update', params={
'title':'bbb',
'text':'',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
h.set_context('test', 'wiki', neighborhood='Projects')
a = model.Page.query.find(dict(title='aaa')).first()
a.text = '\n[TEST]\n'
b = model.Page.query.find(dict(title='TEST')).first()
b.text = '\n[bbb]\n'
ThreadLocalORMSession.flush_all()
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
response = self.app.get('/wiki/TEST/')
assert 'Related' in response
assert 'aaa' in response
assert 'bbb' in response
def test_show_discussion(self):
self.app.post('/wiki/tést/update', params={
'title':'tést',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
wiki_page = self.app.get('/wiki/tést/')
assert wiki_page.html.find('div',{'id':'new_post_holder'})
options_admin = self.app.get('/admin/wiki/options', validate_chunk=True)
assert options_admin.form['show_discussion'].checked
options_admin.form['show_discussion'].checked = False
options_admin.form.submit()
options_admin2 = self.app.get('/admin/wiki/options', validate_chunk=True)
assert not options_admin2.form['show_discussion'].checked
wiki_page2 = self.app.get('/wiki/tést/')
assert not wiki_page2.html.find('div',{'id':'new_post_holder'})
def test_show_left_bar(self):
self.app.post('/wiki/tést/update', params={
'title':'tést',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
wiki_page = self.app.get('/wiki/tést/')
assert wiki_page.html.find('ul',{'class':'sidebarmenu'})
options_admin = self.app.get('/admin/wiki/options', validate_chunk=True)
assert options_admin.form['show_left_bar'].checked
options_admin.form['show_left_bar'].checked = False
options_admin.form.submit()
options_admin2 = self.app.get('/admin/wiki/options', validate_chunk=True)
assert not options_admin2.form['show_left_bar'].checked
wiki_page2 = self.app.get('/wiki/tést/',extra_environ=dict(username='*anonymous'))
assert not wiki_page2.html.find('ul',{'class':'sidebarmenu'})
wiki_page3 = self.app.get('/wiki/tést/')
assert not wiki_page3.html.find('ul',{'class':'sidebarmenu'})
def test_show_metadata(self):
self.app.post('/wiki/tést/update', params={
'title':'tést',
'text':'sometext',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
wiki_page = self.app.get('/wiki/tést/')
assert wiki_page.html.find('div',{'class':'editbox'})
options_admin = self.app.get('/admin/wiki/options', validate_chunk=True)
assert options_admin.form['show_right_bar'].checked
options_admin.form['show_right_bar'].checked = False
options_admin.form.submit()
options_admin2 = self.app.get('/admin/wiki/options', validate_chunk=True)
assert not options_admin2.form['show_right_bar'].checked
wiki_page2 = self.app.get('/wiki/tést/')
assert not wiki_page2.html.find('div',{'class':'editbox'})
def test_edit_mount_label(self):
r = self.app.get('/admin/wiki/edit_label', validate_chunk=True)
assert r.form['mount_label'].value == 'Wiki'
r = self.app.post('/admin/wiki/update_label', params=dict(
mount_label='Tricky Wiki'))
r = self.app.get('/admin/wiki/edit_label', validate_chunk=True)
assert r.form['mount_label'].value == 'Tricky Wiki'
def test_page_links_are_colored(self):
self.app.get('/wiki/space%20page/')
params = {
'title':'space page',
'text':'''There is a space in the title!''',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'}
self.app.post('/wiki/space%20page/update', params=params)
self.app.get('/wiki/TEST/')
params = {
'title':'TEST',
'text':'''
* Here is a link to [this page](TEST)
* Here is a link to [another page](Some page which does not exist)
* Here is a link to [space page space](space page)
* Here is a link to [space page escape](space%20page)
* Here is a link to [TEST]
* Here is a link to [Some page which does not exist]
* Here is a link to [space page]
* Here is a link to [space%20page]
''',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'}
self.app.post('/wiki/TEST/update', params=params)
r = self.app.get('/wiki/TEST/')
found_links = 0
for link in r.html.findAll('a'):
if link.contents == ['this page']:
assert 'notfound' not in link.get('class', '')
found_links +=1
if link.contents == ['another page']:
assert 'notfound' in link.get('class', '')
found_links +=1
if link.contents == ['space page space']:
assert 'notfound' not in link.get('class', '')
found_links +=1
if link.contents == ['space page escape']:
assert 'notfound' not in link.get('class', '')
found_links +=1
if link.contents == ['[TEST]']:
assert 'notfound' not in link.get('class', '')
found_links +=1
if link.contents == ['[Some page which does not exist]']:
assert 'notfound' in link.get('class', '')
found_links +=1
if link.contents == ['[space page]']:
assert 'notfound' not in link.get('class', '')
found_links +=1
if link.contents == ['[space%20page]']:
assert 'notfound' not in link.get('class', '')
found_links +=1
assert found_links == 8, 'Wrong number of links found'
def test_home_rename(self):
assert 'The resource was found at http://localhost/p/test/wiki/Home/;' in self.app.get('/p/test/wiki/')
req = self.app.get('/p/test/wiki/Home/edit')
req.forms[1]['title'].value = 'new_title'
req.forms[1].submit()
assert 'The resource was found at http://localhost/p/test/wiki/new_title/;' in self.app.get('/p/test/wiki/')
def test_page_delete(self):
self.app.post('/wiki/aaa/update', params={
'title':'aaa',
'text':'',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
self.app.post('/wiki/bbb/update', params={
'title':'bbb',
'text':'',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'})
response = self.app.get('/wiki/browse_pages/')
assert 'aaa' in response
assert 'bbb' in response
self.app.post('/wiki/bbb/delete')
response = self.app.get('/wiki/browse_pages/')
assert 'aaa' in response
assert '?deleted=True">bbb' in response
def test_mailto_links(self):
self.app.get('/wiki/test_mailto/')
params = {
'title':'test_mailto',
'text':'''
* Automatic mailto #1 <[email protected]>
* Automatic mailto #2 <mailto:[email protected]>
* Handmaid mailto <a href="mailto:[email protected]">Email Yoda</a>
''',
'labels':'',
'labels_old':'',
'viewable_by-0.id':'all'}
self.app.post('/wiki/test_mailto/update', params=params)
r = self.app.get('/wiki/test_mailto/')
mailto_links = 0
for link in r.html.findAll('a'):
if link.get('href') == 'mailto:[email protected]':
assert 'notfound' not in link.get('class', '')
mailto_links +=1
if link.get('href') == 'mailto:[email protected]':
assert 'notfound' not in link.get('class', '')
mailto_links += 1
if link.get('href') == 'mailto:[email protected]':
assert link.contents == ['Email Yoda']
assert 'notfound' not in link.get('class', '')
mailto_links += 1
assert mailto_links == 3, 'Wrong number of mailto links'
| {
"content_hash": "2118ff6b32c3f2106a6fcc904df99927",
"timestamp": "",
"source": "github",
"line_count": 517,
"max_line_length": 116,
"avg_line_length": 39.22050290135397,
"alnum_prop": 0.5258174286137003,
"repo_name": "leotrubach/sourceforge-allura",
"id": "2f2bfa9107f39021967caef78d4518d5b59ff822",
"size": "20377",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ForgeWiki/forgewiki/tests/functional/test_root.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "D",
"bytes": "2985957"
},
{
"name": "JavaScript",
"bytes": "650950"
},
{
"name": "Puppet",
"bytes": "2677"
},
{
"name": "Python",
"bytes": "1866436"
},
{
"name": "Ruby",
"bytes": "4109"
},
{
"name": "Shell",
"bytes": "6636"
}
],
"symlink_target": ""
} |
'''
@author: m0t
'''
#search for blocks colored purple(0x9933cc) and creates a disabled breakpoint at the start of each.
#To be used with process stalker to immediately see "interesting" blocks
from idc import *
from idautils import *
purple = 0x9933cc #our definition of purple...
#get start address of each function, scan it for purple, setbreakpoint()
funit = Functions()
prevFlag = False
while True:
try:
faddr = funit.next()
except StopIteration:
break
itemsit = FuncItems(faddr)
while True:
try:
item = itemsit.next()
except StopIteration:
break
if GetColor(item, 1) == purple and prevFlag == False:
AddBpt(item)
EnableBpt(item, False)
prevFlag = True
#resetting the flag when we go out of "interesting" block
if GetColor(item, 1) != purple and prevFlag == True:
prevFlag = False
| {
"content_hash": "7e980b980195133e331a9762424ddee7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 99,
"avg_line_length": 23.685714285714287,
"alnum_prop": 0.7141133896260555,
"repo_name": "m0t/ida-scripts",
"id": "5eff4f616e1bf74af785abd760f50d2a582846ad",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mark_blocks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6385"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from interview_project.models import House, Owner
admin.site.register(House)
admin.site.register(Owner)
| {
"content_hash": "73e6c48795692f6e274ea6dbae078497",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 27.6,
"alnum_prop": 0.8260869565217391,
"repo_name": "nick-garcia/interview_project",
"id": "071a07f8cdf164e48b016cff878edd33e94d5334",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interview_project/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10770"
}
],
"symlink_target": ""
} |
import tempfile
from pyontutils.config import auth
__doc__ = f"""Client library generator for SciGraph REST api.
Usage:
scigraph-codegen [options] [--dynamic=<PATH>...]
Options:
-o --output-file=FILE save client library here [default: {tempfile.tempdir}/scigraph_client.py]
-a --api=API API endpoint to build from [default: {auth.get('scigraph-api')}]
-v --scigraph-version=VER API docs version [default: 2]
-b --basepath=BASEPATH alternate default basepath [default: https://scicrunch.org/api/1/sparc-scigraph]
-d --dynamic=<PATH> additional servers to search for dynamic endpoints
"""
import re
import copy
import inspect
import requests
class restService:
""" Base class for SciGraph rest services. """
_api_key = None
_hrx = re.compile('^https?://')
def __init__(self, cache=False, safe_cache=False, key=None, do_error=False):
self._session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=1000, pool_maxsize=1000)
self._session.mount('http://', adapter)
self._do_error = do_error
if cache:
#print('WARNING: cache enabled, if you mutate the contents of return values you will mutate the cache!')
self._cache = dict()
if safe_cache:
self._get = self._safe_cache_get
else:
self._get = self._cache_get
else:
self._get = self._normal_get
if key is not None:
self.api_key = key
raise DeprecationWarning('this way of passing keys will be deprecated soon')
@property
def api_key(self):
return self._api_key
@api_key.setter
def api_key(self, value):
self._api_key = value
def __del__(self):
self._session.close()
def _safe_url(self, url):
return url.replace(self.api_key, '[secure]') if self.api_key else url
@property
def _last_url(self):
return self._safe_url(self.__last_url)
def _normal_get(self, method, url, params=None, output=None):
s = self._session
if self.api_key is not None:
params['key'] = self.api_key
if method == 'POST':
req = requests.Request(method=method, url=url, data=params)
else:
req = requests.Request(method=method, url=url, params=params)
if output:
req.headers['Accept'] = output
prep = req.prepare()
if self._verbose: print(self._safe_url(prep.url))
try:
resp = s.send(prep)
self.__last_url = resp.url
except requests.exceptions.ConnectionError as e:
host_port = prep.url.split(prep.path_url)[0]
raise ConnectionError(f'Could not connect to {host_port}. '
'Are SciGraph services running?') from e
if resp.status_code == 401:
raise ConnectionError(f'{resp.reason}. '
f'Did you set {self.__class__.__name__}.api_key'
' = my_api_key?')
elif not resp.ok:
if self._do_error:
resp.raise_for_status()
else:
return None
elif resp.headers['content-type'] == 'application/json':
return resp.json()
elif resp.headers['content-type'].startswith('text/plain'):
return resp.text
else:
return resp
def _cache_get(self, method, url, params=None, output=None):
if params:
pkey = '?' + '&'.join(['%s=%s' % (k,v) for k,v in sorted(params.items()) if v is not None])
else:
pkey = ''
key = url + pkey + ' ' + method + ' ' + str(output)
if key in self._cache:
if self._verbose:
print('cache hit', key)
self.__last_url, resp = self._cache[key]
else:
resp = self._normal_get(method, url, params, output)
self._cache[key] = self.__last_url, resp
return resp
def _safe_cache_get(self, *args, **kwargs):
""" If cached values might be used in a context where they
could be mutated, then safe_cache = True should be set
and this wrapper will protect the output """
return copy.deepcopy(self._cache_get(*args, **kwargs)) # prevent mutation of the cache
def _make_rest(self, default=None, **kwargs):
kwargs = {k:v for k, v in kwargs.items() if v}
param_rest = '&'.join(['%s={%s}' % (arg, arg) for arg in kwargs if arg != default])
param_rest = param_rest if param_rest else ''
return param_rest
class SUBCLASS:
@classmethod
def make(cls):
code = inspect.getsource(cls).replace('SUBCLASS', cls.__name__ + 'Base')
return '\n\n' + code
class Cypher(SUBCLASS):
@staticmethod
def fix_quotes(string, s1=':["', s2='"],'):
out = []
def subsplit(sstr, s=s2):
#print(s)
if s == '",' and sstr.endswith('"}'): # special case for end of record
s = '"}'
if s:
string, *rest = sstr.rsplit(s, 1)
else:
string = sstr
rest = '',
if rest:
#print('>>>>', string)
#print('>>>>', rest)
r, = rest
if s == '"],':
fixed_string = Cypher.fix_quotes(string, '","', '') + s + r
else:
fixed_string = string.replace('"', r'\"') + s + r
return fixed_string
for sub1 in string.split(s1):
ss = subsplit(sub1)
if ss is None:
if s1 == ':["':
out.append(Cypher.fix_quotes(sub1, ':"', '",'))
else:
out.append(sub1)
else:
out.append(ss)
return s1.join(out)
def fix_cypher(self, record):
rep = re.sub(r'({|, )(\S+)(: "|: \[)', r'\1"\2"\3',
self.fix_quotes(record.strip()).
split(']', 1)[1] .
replace(':"', ': "') .
replace(':[', ': [') .
replace('",', '", ') .
replace('"],', '"], ') .
replace('\n', '\\n') .
replace('xml:lang="en"', r'xml:lang=\"en\"')
)
try:
value = {self.qname(k):v for k, v in literal_eval(rep).items()}
except (ValueError, SyntaxError) as e:
print(repr(record))
print(repr(rep))
raise e
return value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._setCuries()
def _setCuries(self):
try:
self._curies = self.getCuries()
except ConnectionError:
self._curies = {}
self._inv = {v:k for k, v in self._curies.items()}
@property
def api_key(self):
# note that using properties means that
# if you want to use properties at all in
# a subClass hierarchy you have to reimplement
# them every single time to be aware if the
# parent class value chanes
if isinstance(restService.api_key, str):
return restService.api_key
else:
return self._api_key
@api_key.setter
def api_key(self, value):
old_key = self.api_key
self._api_key = value
if old_key is None and value is not None:
self._setCuries()
def qname(self, iri):
for prefix, curie in self._inv.items():
if iri.startswith(prefix):
return iri.replace(prefix, curie + ':')
else:
return iri
def execute(self, query, limit, output='text/plain', **kwargs):
if output == 'text/plain':
out = super().execute(query, limit, output, **kwargs)
rows = []
if out:
for raw in out.split('|')[3:-1]:
record = raw.strip()
if record:
d = self.fix_cypher(record)
rows.append(d)
return rows
else:
return super().execute(query, limit, output, **kwargs)
class Dynamic(SUBCLASS):
@staticmethod
def _path_to_id(path):
return (path.strip('/')
.replace('dynamic/', '')
.replace('{', '')
.replace('}', '')
.replace('/', '_')
.replace('-', '_'))
def _path_function_arg(self, path):
if '?' in path:
path, query = path.split('?', 1)
kwargs = parse_qs(query)
else:
kwargs = {}
if '.' in path:
# FIXME logic seems bad ...
if ':' not in path or path.index('.') > path.index(':'):
raise ValueError('extensions not supported directly please use output=mimetype')
if ':' in path: # curie FIXME way more potential arguments here ...
key = lambda s: len(s)
args = []
puts = []
while ':' in path:
path, arg = path.rsplit('/', 1)
args.append(arg)
base = self._path_to_id(path)
putative = self._path_to_id(path + '/{')
if ':' not in putative:
puts.append(putative)
args.reverse() # args are parsed backwards
cands = sorted([p for p in dir(self) if p.startswith(puts[0])], key=key)
if len(cands) > 1:
effs = [getattr(self, self._path_to_id(c)) for c in cands]
specs = [inspect.getargspec(f) for f in effs]
lens = [len(s.args) - 1 - len(s.defaults) for s in specs]
largs = len(args)
new_cands = []
for c, l in zip(cands, lens):
if l == largs:
new_cands.append(c)
if len(new_cands) > 1:
raise TypeError('sigh')
cands = new_cands
elif not cands:
raise ValueError(f'{self._basePath} does not have endpoints matching {path}')
fname = cands[0]
else:
arg = None
args = []
fname = self._path_to_id(path)
if not hasattr(self, fname):
raise ValueError(f'{self._basePath} does not have endpoint {path} -> {fname!r}')
return getattr(self, fname), args, kwargs
def dispatch(self, path, output='application/json', **kwargs):
f, args, query_kwargs = self._path_function_arg(path)
kwargs.update(query_kwargs)
try:
return f(*args, output=output, **kwargs) if args else f(output=output, **kwargs)
except TypeError as e:
raise TypeError('Did you remember to set parameters in the services config?') from e
class Graph(SUBCLASS):
@staticmethod
def ordered(start, edges, predicate=None, inverse=False):
""" Depth first edges from a SciGraph response. """
s, o = 'sub', 'obj'
if inverse:
s, o = o, s
edges = list(edges)
for edge in tuple(edges):
if predicate is not None and edge['pred'] != predicate:
print('scoop!')
continue
if edge[s] == start:
yield edge
edges.remove(edge)
yield from Graph.ordered(edge[o], edges, predicate=predicate)
class CLASSNAME(restService):
""" DOCSTRING """
def __init__(self, basePath=None, verbose=False, cache=False, safe_cache=False, key=None, do_error=False):
if basePath is None:
basePath = BASEPATH
self._basePath = basePath
self._verbose = verbose
super().__init__(cache=cache, safe_cache=safe_cache, key=key, do_error=do_error)
class FAKECLASS:
def NICKNAME(selfPARAMSDEFAULT_OUTPUTKWARGS):
""" DOCSTRING
"""
{params_conditional}
kwargs = {param_rest}
{dict_comp}
url = self._basePath + ('{path}').format(**kwargs)
requests_params = {dict_comp2}
output = self._get('{method}', url, requests_params, {output})
return output if output else {empty_return_type}
@staticmethod
def make():
code = inspect.getsource(FAKECLASS.NICKNAME)
code = code.replace('requests_params, ', 'requests_params')
code = code.replace(' {params_conditional}','{params_conditional}')
for name in ('NICKNAME','PARAMS','DEFAULT_OUTPUT', 'DOCSTRING', 'KWARGS'):
code = code.replace(name, '{' + name.lower() + '}')
return code
operation_code = FAKECLASS.make()
class State:
def __init__(self, api_url, basepath=None, dynamics=tuple()):
# TODO autopopulate from subclasses
self._dynamics = dynamics
self.classname = None
self._subclasses = {sc.__name__:sc for sc in SUBCLASS.__subclasses__()}
self.shebang = "#!/usr/bin/env python3\n"
self.imports = ('import re\n'
'import copy\n'
'import inspect\n'
'import builtins\n'
'from urllib.parse import parse_qs\n'
'import requests\n'
'from ast import literal_eval\n'
'from json import dumps\n'
'from urllib import parse\n\n')
self._basepath = basepath if basepath is not None else api_url.rsplit('/', 1)[0]
self.api_url = api_url
self.current_path = self.api_url
self.exten_mapping = {}
self._paths = {}
self.globs = {}
self.tab = ' '
self.gencode()
def code(self):
return self.make_main()
def make_main(self):
code = ""
code += self.shebang
code += self.make_doc()
code += self.imports
code += f'BASEPATH = {self._basepath!r}\n\n'
code += "exten_mapping = {%s}\n\n" % ', '.join(["'" + '\': \''.join(_) + "'" for _ in sorted(self.exten_mapping.items())])
code += self.make_baseclass()
code += self._code
code += '\n'
return code
def make_doc(self):
code = ('"""WARNING: DO NOT MODIFY THIS FILE\n'
'IT IS AUTOMATICALLY GENERATED BY scigraph.py\n'
'AND WILL BE OVERWRITTEN\n'
'Swagger Version: {swaggerVersion}, API Version: {apiVersion}\n'
'generated for {api_url}\nby scigraph.py\n"""\n')
swaggerVersion = self.globs['swaggerVersion']
apiVersion = self.globs['apiVersion']
return code.format(swaggerVersion=swaggerVersion, apiVersion=apiVersion, api_url=self.api_url, t=self.tab)
def make_baseclass(self):
return inspect.getsource(restService) + '\n'
def make_class(self, dict_):
code = '\n' + inspect.getsource(CLASSNAME) + '\n'
classname = dict_['resourcePath'].strip('/').capitalize()
docstring = dict_['docstring']
if classname in self._subclasses:
self.classname = classname # FIXME ICK
classname = classname + 'Base'
print('Generating:', classname)
#_, basePath = self.basePath_(dict_['basePath'])
return (code.replace('CLASSNAME', classname)
.replace('DOCSTRING', docstring)
.replace("'BASEPATH'", 'BASEPATH'))
def make_subclass(self):
if self.classname in self._subclasses:
subclass = self._subclasses[self.classname]
subclass_code = subclass.make()
self.classname = None
return subclass_code
else:
return ''
def make_param_parts(self, dict_):
if dict_['required']:
#param_args = '{name}'
#param_args = param_args.format(name=dict_['name'])
param_args = dict_['name']
required = param_args
else:
param_args = "{name}={defaultValue}"
dv = dict_.get('defaultValue', None)
if dv:
try:
dv = int(dv)
except ValueError:
if dv == 'true':
dv = 'True'
elif dv == 'false':
dv = 'False'
else:
dv = "'%s'" % dv
param_args = param_args.format(name=dict_['name'], defaultValue=dv)
required = None
#param_rest = '{name}'
#param_rest = param_rest.format(name=dict_['name'])
param_rest = dict_['name']
param_doc = '{t}{t}{t}{name}:{description}'
desc = dict_.get('description','')
LIMIT = 65
if len(desc) > LIMIT:
desc = desc.replace('>', '> ||').replace('<', '|| <')
tmp = desc.split(' ')
lines = []
line = None
for token in tmp:
if not line:
line = token
elif len(line) + len(' ' + token) > LIMIT:
lines.append(line)
line = token
else:
line += ' ' + token
if line not in lines:
if len(line) < 10:
lines[-1] += ' ' + line
else:
lines.append(line)
space = (' ' * (len(dict_['name']) + 2))
desc = '\n{t}{t}{t}{space}'.format(t=self.tab, space=space).join([l for l in lines if l])
desc = desc.replace('> ||', '>').replace('|| <', '<').replace('||', '')
desc = ' ' + desc if desc else desc
param_doc = param_doc.format(name=dict_['name'], description=desc, t=self.tab)
return param_args, param_rest, param_doc, required
def make_params(self, list_):
pargs_list, prests, pdocs = [], [], []
required = None
needs_kwargs = False
for param in list_:
if 'schema' in param: # skip 'body' entries, they cause problems
continue
parg, prest, pdoc, put_required = self.make_param_parts(param)
if put_required:
required = "'%s'" % put_required # XXX fail for multi required?
pargs_list.append(parg)
prests.append(prest)
pdocs.append(pdoc)
if param['name'] == 'cypherQuery':
needs_kwargs = True
if pargs_list:
pargs = ', ' + ', '.join(pargs_list)
else:
pargs = ''
pkeys = prests
kwargs = ', **kwargs' if needs_kwargs else ''
if prests:
prests = '{' + ', '.join([f'{pr!r}: {pr}' for pr in prests]) + kwargs + '}'
else:
prests = '{}'
pdocs = '\n'.join(pdocs)
return pargs, prests, pdocs, required, pkeys, needs_kwargs
def make_return(self, api_dict):
return_type = None
if 'type' in api_dict:
return_type = api_dict['type'] # array or other (Graph, etc)
print(return_type)
elif 'responses' in api_dict:
resps = api_dict['responses']
if '200' in resps:
scm = resps['200']['schema']
if 'type' in scm:
return_type = scm['type']
if return_type is None:
print(f' No return type for {api_dict["operationId"]}')
type_return_dict = { # TODO source from asdf['definitions'] for 2.0
'array': '[]',
'object': '{}',
'string': None,
'Annotations': '[]', # bug in docs
'Graph': "{'nodes':[], 'edges':[]}", # risky
'ConceptDTO': None, # better None than empty dict
'RefineResult': None, # TODO
'AnalyzerResult' :None, # TODO
None:None,
}
return type_return_dict[return_type]
def apiVersion(self, value):
self.globs['apiVersion'] = value
return None, ''
def swaggerVersion(self, value):
self.globs['swaggerVersion'] = value
return None, ''
def operation(self, api_dict):
params, param_rest, param_docs, required, pkeys, needs_kwargs = self.make_params(api_dict['parameters'])
dict_comp = (('kwargs = {k:dumps(v) if builtins.type(v) '
'is dict else v for k, v in kwargs.items()}')
# json needs " not '
if param_rest != '{}' else '# type caste not needed')
empty_return_type = self.make_return(api_dict)
nickname = api_dict['nickname']
path = self._paths[nickname]
docstring = (api_dict.get('summary', '') +
' from: ' +
path +
'\n\n{t}{t}{t}Arguments:\n'.format(t=self.tab) +
param_docs)
if 'x-query' in api_dict:
_p = '{t}{t}{t}'.format(t=self.tab)
_query = api_dict['x-query'].replace('\n', '\n' + _p)
docstring += '\n\n{p}Query:\n{p}{q}'.format(p=_p, q=_query)
docstring = docstring.rstrip() + '\n'
# handle whether required is in the url
if required:
if '{' + required.strip("'") + '}' not in path:
required = None
if required:
dict_comp2 = '{k:v for k, v in kwargs.items() if k != %s}' % required
else:
dict_comp2 = 'kwargs'
params_conditional = ''
for key in pkeys:
#if [_ for _ in ('id', 'url', 'type', 'relationship') if _ in key]:
# FIXME detect this from the parameter type ...
if key in ('id', 'artifact_id', 'species_id',
'region_id', 'species-id', 'fma_id', 'root_id'):
cond = key
params_conditional += (
"\n{t}{t}if {cond} and self._hrx.match({cond}):\n"
"{t}{t}{t}{cond} = parse.quote({cond}, safe='')").format(cond=cond, t=self.tab)
if 'produces' in api_dict: # ICK but the alt is nastier
outputs, default_output = self.make_produces(api_dict['produces'])
docstring += outputs
output = ', output'
else:
default_output = ''
output = ''
kwargs = ', **kwargs' if needs_kwargs else ''
method = api_dict['method']
if '{' in path and '-' in path: # FIXME hack
before, after = path.split('{', 1) # use split since there can be multiple paths
path = before + '{' + after.replace('-', '_')
formatted = operation_code.format(
path=path, nickname=nickname, params=params, param_rest=param_rest,
dict_comp=dict_comp, dict_comp2=dict_comp2, method=method,
docstring=docstring, required=required, default_output=default_output, kwargs=kwargs,
params_conditional=params_conditional, output=output, t=self.tab,
empty_return_type=empty_return_type)
self.dodict(api_dict) # catch any stateful things we need, but we arent generating code from it
return formatted
def description(self, value):
return None, ''
def resourcePath(self, value):
return None, ''
def top_path(self, extension):
newpath = self.api_url + extension
json = requests.get(newpath).json()
return json
def path(self, value):
# if anything do substitution here
# need something extra here?
return None, ''
def apis(self, list_):
print(' Starting ...')
try:
for api in list_:
if 'operations' in api:
for operation in api['operations']:
self._paths[operation['nickname']] = api['path']
except:
raise BaseException
return None, self.dolist(list_)
def models(self, dict_):
return None, self.dodict(dict_)
def Features(self, dict_):
self.dodict(dict_)
return None, ''
def Graph(self, dict_):
self.dodict(dict_)
return None, ''
def properties(self, dict_):
return None, self.dodict(dict_)
def operations(self, list_):
self.context = 'operations'
code = '\n'.join(self.operation(l) for l in list_)
return None, code
def produces(self, list_):
return None, ''
def make_produces(self, list_):
# we make return option here including the docstring
for mimetype in list_:
self.exten_mapping[mimetype] = mimetype.split('/')[-1]
outputs = '\n{t}{t}{t}outputs:\n{t}{t}{t}{t}'
outputs += '\n{t}{t}{t}{t}'.join(list_)
default_output = ', output=\'{output}\''.format(output=list_[0])
return outputs.format(t=self.tab), default_output # FIXME there MUST be a better way to deal with the bloody {t} all at once
def basePath_(self, value):
dirs = value.split('/')
curs = self.api_url.split('/')
for d in dirs:
if d == '..':
curs = curs[:-1]
else:
curs.append(d)
return None, '/'.join(curs)
def dolist(self, list_):
blocks = []
def sortkey(d):
if 'path' in d:
return d['path']
elif 'nickname' in d:
return d['nickname']
elif 'name' in d:
return d['name']
else:
return 0
list_.sort(key=sortkey)
for dict_ in list_:
code = self.dodict(dict_)
blocks.append(code)
return '\n'.join([b for b in blocks if b])
def dodict(self, dict_):
blocks = []
methods = {k:v for k, v in inspect.getmembers(self) if k != 'code' and inspect.ismethod(v)} # ismethod calls properties :/
for key, value in dict_.items():
#print('trying with key:', key)
if key in methods:
#name, code = methods[key](self, value)
name, code = methods[key](value)
blocks.append(code)
else:
#print('METHOD', key, 'NOT FOUND')
pass
return '\n'.join([b for b in blocks if b])
def class_json(self, dict_):
code = self.make_class(dict_)
methods = self.dodict(dict_)
subclass_code = self.make_subclass()
if methods:
code += methods
else:
code += ' # No methods exist for this API endpoint.\n'
return None, code + subclass_code
def dotopdict(self, dict_):
for api in dict_['apis']:
json = self.top_path(api['path'])
json['docstring'] = api['description']
api['class_json'] = json
return dict_
def gencode(self):
""" Run this to generate the code """
resp = requests.get(self.api_url)
if not resp.ok:
if resp.status_code == 401 and 'scicrunch.org' in self.api_url:
resp = requests.get(
self.api_url,
params={'key': auth.get('scigraph-api-key')})
else:
resp.raise_for_status()
ledict = resp.json()
for durl in self._dynamics:
dj = requests.get(durl).json()
for p in dj['paths']:
if p.startswith('/dynamic') and p not in ledict['paths']:
ledict['paths'][p] = dj['paths'][p]
ledict = self.dotopdict(ledict)
out = self.dodict(ledict)
self._code = out
class State2(State):
path_prefix = ''
dynamic_produces = [
'application/json',
'application/graphson',
'application/xml',
'application/graphml+xml',
'application/xgmml',
'text/gml',
'text/csv',
'text/tab-separated-values',
'image/jpeg',
'image/png',
]
def dotopdict(self, dict_):
""" Rewrite the 2.0 json to match what we feed the code for 1.2 """
mlookup = {'get':'GET', 'post':'POST'}
def rearrange(path, method_dict, method):
oid = method_dict['operationId']
self._paths[oid] = path
method_dict['nickname'] = oid
method_dict['method'] = mlookup[method]
paths = dict_['paths']
for path, path_dict in paths.items():
if path.startswith('/dynamic'):
#if '{' in path:
#operationId = path.split('/{', 1)[0].rsplit('/', 1)[-1]
operationId = Dynamic._path_to_id(path)
xq = path_dict.pop('x-query')
for k in tuple(path_dict):
if k.startswith('x-'):
print(f'Removed unknown key: {k}')
path_dict.pop(k)
for method_dict in path_dict.values():
method_dict['operationId'] = operationId
method_dict['x-query'] = xq
method_dict['produces'] = self.dynamic_produces
method_dict['path'] = path # FIXME ??
for pd in method_dict['parameters']:
pd['name'] = pd['name'].replace('-', '_')
elif self.path_prefix and self.path_prefix not in path:
continue
path_dict['operations'] = []
for method, method_dict in sorted(path_dict.items()):
if method == 'operations':
continue
rearrange(path, method_dict, method)
#print(self.operation(method_dict))
path_dict['operations'].append(method_dict)
path_dict['path'] = path
def setp(v, lenp=len(self.path_prefix)):
v['path'] = v['path'][lenp:]
return v
dict_['apis'] = []
for tag_dict in dict_['tags']:
path = '/' + tag_dict['name']
d = {'path':path,
'description':tag_dict['description'],
'class_json':{
'docstring':tag_dict['description'],
'resourcePath':path,
'apis':[setp(v) for k, v in paths.items()
if k.startswith(self.path_prefix + path)]},
}
dict_['apis'].append(d)
# make sure this is run first so we don't get key errors
self._swagger(dict_['swagger'])
self._info(dict_['info'])
self._definitions(dict_['definitions'])
return dict_
def _swagger(self, string):
self.globs['swaggerVersion'] = string
return None, ''
def _info(self, dict_):
self._version(dict_['version'])
return None, ''
def _version(self, string):
self.globs['apiVersion'] = string
return None, ''
def _definitions(self, dict_):
self._return_defs = dict_
return None, ''
def title(self, string):
return None, ''
def tags(self, list_):
return None, ''
def moduleDirect(basepath, module_name, *, version=2):
""" Avoid the need for dynamics altogether """
if version < 2:
state = State
docs_path = 'api-docs'
else:
state = State2
docs_path = 'swagger.json'
api_url = f'{basepath}/{docs_path}'
s = state(api_url, basepath)
code = s.code()
return importDirect(code, module_name)
def importDirect(code, module_name):
from types import ModuleType
compiled = compile(code, '', 'exec')
module = ModuleType(module_name)
exec(compiled, module.__dict__)
return module
def main():
from docopt import docopt
from docopt import parse_defaults
defaults = {o.name:o.value if o.argcount else None for o in parse_defaults(__doc__)}
args = docopt(__doc__, version='scigraph-codegen 1.0.0')
ssd = 'https://scicrunch.org/swagger-docs'
if args['--api'] == defaults['--basepath']:
args['--api'] = ssd
if args['--api'] == 'https://scicrunch.org/api/1/scigraph':
args['--api'] = ssd
if args['--api'] == ssd:
State2.path_prefix = '/scigraph'
output_file, api, version, basepath = (
args['--' + k]
for k in ('output-file', 'api', 'scigraph-version', 'basepath'))
version = int(version)
basepath = None if basepath == 'default' else basepath
if version < 2:
state = State
docs_path = 'api-docs'
else:
state = State2
docs_path = 'swagger.json'
api_url = f'{api}/{docs_path}'
print(api_url)
dynamics = [f'{d}/swagger.json' for d in args['--dynamic']]
if dynamics:
print('dynamics:', dynamics)
s = state(api_url, basepath, dynamics=dynamics)
code = s.code()
with open(output_file, 'wt') as f:
f.write(code)
import os
os.system(f'python {output_file}')
if __name__ == '__main__':
main()
| {
"content_hash": "7f6ab323bfb946b2f6d1f4ef74f75c48",
"timestamp": "",
"source": "github",
"line_count": 973,
"max_line_length": 134,
"avg_line_length": 34.10791366906475,
"alnum_prop": 0.5060113900021093,
"repo_name": "tgbugs/pyontutils",
"id": "536df437ce35ebaf4c9f4b8fadba8adc51b2140f",
"size": "33210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyontutils/scigraph_codegen.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "1847"
},
{
"name": "HTML",
"bytes": "40561"
},
{
"name": "JavaScript",
"bytes": "702"
},
{
"name": "Jupyter Notebook",
"bytes": "831901"
},
{
"name": "Lua",
"bytes": "8298"
},
{
"name": "Python",
"bytes": "2278543"
},
{
"name": "Racket",
"bytes": "72332"
},
{
"name": "Shell",
"bytes": "10505"
}
],
"symlink_target": ""
} |
from PySide import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(318, 130)
self.g1 = QtGui.QGroupBox(Dialog)
self.g1.setGeometry(QtCore.QRect(10, 10, 301, 80))
self.g1.setObjectName("g1")
self.lblIP = QtGui.QLabel(self.g1)
self.lblIP.setGeometry(QtCore.QRect(60, 20, 46, 13))
self.lblIP.setObjectName("lblIP")
self.entIP = QtGui.QLineEdit(self.g1)
self.entIP.setGeometry(QtCore.QRect(120, 20, 113, 20))
self.entIP.setObjectName("entIP")
self.label = QtGui.QLabel(self.g1)
self.label.setGeometry(QtCore.QRect(10, 50, 101, 16))
self.label.setObjectName("label")
self.entPort = QtGui.QLineEdit(self.g1)
self.entPort.setGeometry(QtCore.QRect(120, 50, 113, 20))
self.entPort.setObjectName("entPort")
self.btnConnect = QtGui.QPushButton(Dialog)
self.btnConnect.setGeometry(QtCore.QRect(10, 100, 101, 23))
self.btnConnect.setObjectName("btnConnect")
self.btnCancel = QtGui.QPushButton(Dialog)
self.btnCancel.setGeometry(QtCore.QRect(200, 100, 101, 23))
self.btnCancel.setObjectName("btnCancel")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Подключение к серверу", None, QtGui.QApplication.UnicodeUTF8))
self.g1.setTitle(QtGui.QApplication.translate("Dialog", "Параметры подключения", None, QtGui.QApplication.UnicodeUTF8))
self.lblIP.setText(QtGui.QApplication.translate("Dialog", "Адрес IP", None, QtGui.QApplication.UnicodeUTF8))
self.entIP.setText(QtGui.QApplication.translate("Dialog", "127.0.0.1", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Порт подключения", None, QtGui.QApplication.UnicodeUTF8))
self.entPort.setText(QtGui.QApplication.translate("Dialog", "38752", None, QtGui.QApplication.UnicodeUTF8))
self.btnConnect.setText(QtGui.QApplication.translate("Dialog", "Подключиться", None, QtGui.QApplication.UnicodeUTF8))
self.btnCancel.setText(QtGui.QApplication.translate("Dialog", "Отмена", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| {
"content_hash": "d11c462b01cd9eafd9bb1f23812acb03",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 132,
"avg_line_length": 50.05882352941177,
"alnum_prop": 0.6897767332549941,
"repo_name": "prospero78/pyTrans",
"id": "6bae8ea915f6ba4317f2aad40ad834d47bc29bff",
"size": "2975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Client/pakTransClient/pakGui/pakWinConnect/Ui_resWinConnect.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "249"
},
{
"name": "Python",
"bytes": "35776"
},
{
"name": "Visual Basic",
"bytes": "253"
}
],
"symlink_target": ""
} |
from random import randint
from unittest import TestCase
from rapidtest import Case, Result
from rapidtest.executors import Operation, Operations
from rapidtest.utils import nop, identity, randints
class TestCase_(TestCase):
def test_process_args(self):
c = Case()
res = c.process_args([1, 2, 3], False)
self.assertEqual(res, Operations((), [Operation(None, [1, 2, 3], True)]))
res = c.process_args([
['a', 2, 'c'],
'push', [1, 2, 3],
'pop', Result(1),
'count',
'count',
'pop', Result('d'),
'push', [[4, 5]], Result([0]),
'len'
], True)
self.assertEqual(res, Operations(('a', 2, 'c'), [
Operation('push', [1, 2, 3], False),
Operation('pop', collect=True),
Operation('count'),
Operation('count'),
Operation('pop', collect=True),
Operation('push', [[4, 5]], True),
Operation('len'),
]))
STRS = [
([], r'no args were specified'),
([
[1, 2, 3],
[]
], r'expected.*, got \[\]'),
([
[1, 2, 3],
'a', [], [1]
], r'expected.*, got \[1\]'),
([
'a', Result(1), Result(2)
], r'expected.*, got Result\(2\)'),
([
Result(1),
], r'got Result\(1\)'),
([
[1, 2, 3],
Result(1)
], r'got Result\(1\)'),
([
'1', Result('b'), [1]
], r'got \[1\]'),
]
for args, pat in STRS:
with self.assertRaisesRegexp(ValueError, pat):
c.process_args(args, True)
with self.assertRaisesRegexp(ValueError, r'no args'):
c.process_args([], True)
with self.assertRaisesRegexp(ValueError, r'no method call'):
c.process_args([[]], True)
def test__initialize(self):
with self.assertRaisesRegexp(RuntimeError, r'target.*specified.*neither'):
Case('append', Result(1), operation=True)._initialize()
with self.assertRaisesRegexp(RuntimeError, r'both'):
Case('append', Result(1), result=1, target=nop)._initialize()
with self.assertRaisesRegexp(RuntimeError, r'both'):
Case('append', Result(2), operation=True, result=None, target=nop)._initialize()
Case('append', result=1, target=nop)._initialize()
with self.assertRaisesRegexp(RuntimeError, r'object.*not specified'):
Case('append', operation=True, target=nop)._initialize()
with self.assertRaisesRegexp(RuntimeError, r'result.*not specified'):
Case('append', target=nop)._initialize()
Case('append', Result(2), operation=True, target=nop)._initialize()
with self.assertRaisesRegexp(RuntimeError, r'keyword.*target.*operation is True'):
Case('append', operation=True, result=None, target=nop)._initialize()
Case('append', operation=True, result=list, target=nop)._initialize()
with self.assertRaises(AttributeError):
Case('a', operation=True, result=list, target=nop)._initialize()
with self.assertRaisesRegexp(RuntimeError, r'object.*not accepted'):
Case('append', Result(2), target=nop)._initialize()
def test_preprocess_in_place(self):
f = Case.preprocess_in_place(True)
self.assertEqual(f, identity)
f = Case.preprocess_in_place(False)
self.assertIsNone(f)
for i in range(1, 100):
args = randints(i, max_num=i * 100)
idx = randint(0, i - 1)
f = Case.preprocess_in_place(idx)
self.assertEqual(f(args), args[idx])
indices = randints(randint(1, i), unique=True, max_num=i - 1)
f = Case.preprocess_in_place(indices)
self.assertEqual(f(args), [args[idx] for idx in indices])
with self.assertRaises(TypeError):
Case.preprocess_in_place('123')
with self.assertRaises(TypeError):
Case.preprocess_in_place(['123'])
with self.assertRaises(TypeError):
Case.preprocess_in_place('')
with self.assertRaises(TypeError):
Case.preprocess_in_place(1.1)
with self.assertRaises(TypeError):
Case.preprocess_in_place([1.1])
with self.assertRaises(ValueError):
Case.preprocess_in_place([])
| {
"content_hash": "020b8e8a83beac6dbaf9006f5dabba63",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 92,
"avg_line_length": 35.49612403100775,
"alnum_prop": 0.5317754968333698,
"repo_name": "yehzhang/RapidTest",
"id": "81f206f9e91239b82535a9183c437658c9d72ac5",
"size": "4579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "30481"
},
{
"name": "Python",
"bytes": "102077"
}
],
"symlink_target": ""
} |
"""Error codes - copied from the PS6000 programmer's manual."""
# To get formatting correct do following copy-replace in
# Programmers Notepad
# 1. Copy/replace ' - ' with '", "'
# 2. Copy/replace '\r' with '"],\r' (enable slash expressions when doing)
# 3. Copy/replace '^([0-9A-F]{2} ){1}' with '0x\1, "' (w/ regex)
# 4. Copy/replace '^([0-9A-F]{3} ){1}' with '0x\1, "' (w/ regex)
# 5. Copy/repplace '0x' with '[0x'
ERROR_CODES = [
[0x00, "PICO_OK", "The PicoScope XXXX is functioning correctly."],
[0x01, "PICO_MAX_UNITS_OPENED",
"An attempt has been made to open more than PSXXXX_MAX_UNITS."],
[0x02, "PICO_MEMORY_FAIL",
"Not enough memory could be allocated on the host machine."],
[0x03, "PICO_NOT_FOUND", "No PicoScope XXXX could be found."],
[0x04, "PICO_FW_FAIL", "Unable to download firmware."],
[0x05, "PICO_OPEN_OPERATION_IN_PROGRESS"],
[0x06, "PICO_OPERATION_FAILED"],
[0x07, "PICO_NOT_RESPONDING",
"The PicoScope XXXX is not responding to commands from the PC."],
[0x08, "PICO_CONFIG_FAIL",
"The configuration information in the PicoScope XXXX has become " +
"corrupt or is missing."],
[0x09, "PICO_KERNEL_DRIVER_TOO_OLD",
"The picopp.sys file is too old to be used with the device driver."],
[0x0A, "PICO_EEPROM_CORRUPT",
"The EEPROM has become corrupt, so the device will use a default " +
"setting."],
[0x0B, "PICO_OS_NOT_SUPPORTED",
"The operating system on the PC is not supported by this driver."],
[0x0C, "PICO_INVALID_HANDLE",
"There is no device with the handle value passed."],
[0x0D, "PICO_INVALID_PARAMETER", "A parameter value is not valid."],
[0x0E, "PICO_INVALID_TIMEBASE",
"The timebase is not supported or is invalid."],
[0x0F, "PICO_INVALID_VOLTAGE_RANGE",
"The voltage range is not supported or is invalid."],
[0x10, "PICO_INVALID_CHANNEL",
"The channel number is not valid on this device or no channels have " +
"been set."],
[0x11, "PICO_INVALID_TRIGGER_CHANNEL",
"The channel set for a trigger is not available on this device."],
[0x12, "PICO_INVALID_CONDITION_CHANNEL",
"The channel set for a condition is not available on this device."],
[0x13, "PICO_NO_SIGNAL_GENERATOR",
"The device does not have a signal generator."],
[0x14, "PICO_STREAMING_FAILED",
"Streaming has failed to start or has stopped without user request."],
[0x15, "PICO_BLOCK_MODE_FAILED",
"Block failed to start", "a parameter may have been set wrongly."],
[0x16, "PICO_NULL_PARAMETER", "A parameter that was required is NULL."],
[0x18, "PICO_DATA_NOT_AVAILABLE",
"No data is available from a run block call."],
[0x19, "PICO_STRING_BUFFER_TOO_SMALL",
"The buffer passed for the information was too small."],
[0x1A, "PICO_ETS_NOT_SUPPORTED", "ETS is not supported on this device."],
[0x1B, "PICO_AUTO_TRIGGER_TIME_TOO_SHORT",
"The auto trigger time is less than the time it will take to collect " +
"the pre-trigger data."],
[0x1C, "PICO_BUFFER_STALL",
"The collection of data has stalled as unread data would be " +
"overwritten."],
[0x1D, "PICO_TOO_MANY_SAMPLES",
"Number of samples requested is more than available in the current " +
"memory segment."],
[0x1E, "PICO_TOO_MANY_SEGMENTS",
"Not possible to create number of segments requested."],
[0x1F, "PICO_PULSE_WIDTH_QUALIFIER",
"A null pointer has been passed in the trigger function or one of the " +
"parameters is out of range."],
[0x20, "PICO_DELAY",
"One or more of the hold-off parameters are out of range."],
[0x21, "PICO_SOURCE_DETAILS",
"One or more of the source details are incorrect."],
[0x22, "PICO_CONDITIONS", "One or more of the conditions are incorrect."],
[0x23, "PICO_USER_CALLBACK",
"The driver's thread is currently in the psXXXXBlockReady callback " +
"function and therefore the action cannot be carried out."],
[0x24, "PICO_DEVICE_SAMPLING",
"An attempt is being made to get stored data while streaming. " +
"Either stop streaming by calling psXXXXStop, or use " +
"psXXXXGetStreamingLatestValues."],
[0x25, "PICO_NO_SAMPLES_AVAILABLE",
"because a run has not been completed."],
[0x26, "PICO_SEGMENT_OUT_OF_RANGE",
"The memory index is out of range."],
[0x27, "PICO_BUSY", "Data cannot be returned yet."],
[0x28, "PICO_STARTINDEX_INVALID",
"The start time to get stored data is out of range."],
[0x29, "PICO_INVALID_INFO",
"The information number requested is not a valid number."],
[0x2A, "PICO_INFO_UNAVAILABLE",
"The handle is invalid so no information is available about the device." +
" Only PICO_DRIVER_VERSION is available."],
[0x2B, "PICO_INVALID_SAMPLE_INTERVAL",
"The sample interval selected for streaming is out of range."],
[0x2D, "PICO_MEMORY", "Driver cannot allocate memory."],
[0x2E, "PICO_SIG_GEN_PARAM",
"Incorrect parameter passed to signal generator."],
[0x34, "PICO_WARNING_AUX_OUTPUT_CONFLICT",
"AUX cannot be used as input and output at the same time."],
[0x35, "PICO_SIGGEN_OUTPUT_OVER_VOLTAGE",
"The combined peak to peak voltage and the analog offset voltage " +
"exceed the allowable voltage the signal generator can produce."],
[0x36, "PICO_DELAY_NULL", "NULL pointer passed as delay parameter."],
[0x37, "PICO_INVALID_BUFFER",
"The buffers for overview data have not been set while streaming."],
[0x38, "PICO_SIGGEN_OFFSET_VOLTAGE",
"The analog offset voltage is out of range."],
[0x39, "PICO_SIGGEN_PK_TO_PK",
"The analog peak to peak voltage is out of range."],
[0x3A, "PICO_CANCELLED", "A block collection has been cancelled."],
[0x3B, "PICO_SEGMENT_NOT_USED",
"The segment index is not currently being used."],
[0x3C, "PICO_INVALID_CALL",
"The wrong GetValues function has been called for the collection mode " +
"in use."],
[0x3F, "PICO_NOT_USED", "The function is not available."],
[0x40, "PICO_INVALID_SAMPLERATIO",
"The aggregation ratio requested is out of range."],
[0x41, "PICO_INVALID_STATE",
"Device is in an invalid state."],
[0x42, "PICO_NOT_ENOUGH_SEGMENTS",
"The number of segments allocated is fewer than the number of captures " +
"requested."],
[0x43, "PICO_DRIVER_FUNCTION",
"You called a driver function while another driver function was still " +
"being processed."],
[0x45, "PICO_INVALID_COUPLING",
"An invalid coupling type was specified in psXXXXSetChannel."],
[0x46, "PICO_BUFFERS_NOT_SET",
"An attempt was made to get data before a data buffer was defined."],
[0x47, "PICO_RATIO_MODE_NOT_SUPPORTED",
"The selected downsampling mode (used for data reduction) is not " +
"allowed."],
[0x49, "PICO_INVALID_TRIGGER_PROPERTY",
"An invalid parameter was passed to psXXXXSetTriggerChannelProperties."],
[0x4A, "PICO_INTERFACE_NOT_CONNECTED",
"The driver was unable to contact the oscilloscope."],
[0x4D, "PICO_SIGGEN_WAVEFORM_SETUP_FAILED",
"A problem occurred in psXXXXSetSigGenBuiltIn or " +
"psXXXXSetSigGenArbitrary."],
[0x4E, "PICO_FPGA_FAIL"],
[0x4F, "PICO_POWER_MANAGER"],
[0x50, "PICO_INVALID_ANALOGUE_OFFSET",
"An impossible analogue offset value was specified in psXXXXSetChannel."],
[0x51, "PICO_PLL_LOCK_FAILED",
"Unable to configure the PicoScope XXXX."],
[0x52, "PICO_ANALOG_BOARD",
"The oscilloscope's analog board is not detected, or is not connected " +
"to the digital board."],
[0x53, "PICO_CONFIG_FAIL_AWG",
"Unable to configure the signal generator."],
[0x54, "PICO_INITIALISE_FPGA",
"The FPGA cannot be initialized, so unit cannot be opened."],
[0x56, "PICO_EXTERNAL_FREQUENCY_INVALID",
"The frequency for the external clock is not within ±5% of the " +
"stated value."],
[0x57, "PICO_CLOCK_CHANGE_ERROR",
"The FPGA could not lock the clock signal."],
[0x58, "PICO_TRIGGER_AND_EXTERNAL_CLOCK_CLASH",
"You are trying to configure the AUX input as both a trigger and a " +
"reference clock."],
[0x59, "PICO_PWQ_AND_EXTERNAL_CLOCK_CLASH",
"You are trying to configure the AUX input as both a pulse width " +
"qualifier and a reference clock."],
[0x5A, "PICO_UNABLE_TO_OPEN_SCALING_FILE",
"The scaling file set can not be opened."],
[0x5B, "PICO_MEMORY_CLOCK_FREQUENCY",
"The frequency of the memory is reporting incorrectly."],
[0x5C, "PICO_I2C_NOT_RESPONDING",
"The I2C that is being actioned is not responding to requests."],
[0x5D, "PICO_NO_CAPTURES_AVAILABLE",
"There are no captures available and therefore no data can be returned."],
[0x5E, "PICO_NOT_USED_IN_THIS_CAPTURE_MODE",
"The capture mode the device is currently running in does not support " +
"the current request."],
[0x103, "PICO_GET_DATA_ACTIVE", "Reserved"],
[0x104, "PICO_IP_NETWORKED", "The device is currently connected via " +
"the IP Network socket and thus the call made is not supported."],
[0x105, "PICO_INVALID_IP_ADDRESS", "An IP address that is not correct " +
"has been passed to the driver."],
[0x106, "PICO_IPSOCKET_FAILED", "The IP socket has failed."],
[0x107, "PICO_IPSOCKET_TIMEDOUT", "The IP socket has timed out."],
[0x108, "PICO_SETTINGS_FAILED", "The settings requested have failed to " +
"be set."],
[0x109, "PICO_NETWORK_FAILED", "The network connection has failed."],
[0x10A, "PICO_WS2_32_DLL_NOT_LOADED", "Unable to load the WS2 dll."],
[0x10B, "PICO_INVALID_IP_PORT", "The IP port is invalid."],
[0x10C, "PICO_COUPLING_NOT_SUPPORTED",
"The type of coupling requested is not supported on the opened device."],
[0x10D, "PICO_BANDWIDTH_NOT_SUPPORTED",
"Bandwidth limit is not supported on the opened device."],
[0x10E, "PICO_INVALID_BANDWIDTH",
"The value requested for the bandwidth limit is out of range."],
[0x10F, "PICO_AWG_NOT_SUPPORTED",
"The device does not have an arbitrary waveform generator."],
[0x110, "PICO_ETS_NOT_RUNNING",
"Data has been requested with ETS mode set but run block has not been " +
"called, or stop has been called."],
[0x111, "PICO_SIG_GEN_WHITENOISE_NOT_SUPPORTED",
"White noise is not supported on the opened device."],
[0x112, "PICO_SIG_GEN_WAVETYPE_NOT_SUPPORTED",
"The wave type requested is not supported by the opened device."],
[0x116, "PICO_SIG_GEN_PRBS_NOT_SUPPORTED",
"Siggen does not generate pseudorandom bit stream."],
[0x117, "PICO_ETS_NOT_AVAILABLE_WITH_LOGIC_CHANNELS",
"When a digital port is enabled, ETS sample mode is not available for " +
"use."],
[0x118, "PICO_WARNING_REPEAT_VALUE", "Not applicable to this device."],
[0x119, "PICO_POWER_SUPPLY_CONNECTED",
"The DC power supply is connected."],
[0x11A, "PICO_POWER_SUPPLY_NOT_CONNECTED",
"The DC power supply isn’t connected."],
[0x11B, "PICO_POWER_SUPPLY_REQUEST_INVALID",
"Incorrect power mode passed for current power source."],
[0x11C, "PICO_POWER_SUPPLY_UNDERVOLTAGE",
"The supply voltage from the USB source is too low."],
[0x11D, "PICO_CAPTURING_DATA",
"The device is currently busy capturing data."],
[0x11E, "PICO_USB3_0_DEVICE_NON_USB3_0_PORT",
"You must connect the device to a USB 3.0 port, or call " +
"ps4000aChangePowerSource to switch the device into " +
"non-USB 3.0-power mode"],
[0x11F, "PICO_NOT_SUPPORTED_BY_THIS_DEVICE",
"A function has been called that is not supported by the current " +
"device variant."],
[0x120, "PICO_INVALID_DEVICE_RESOLUTION",
"The device resolution is invalid (out of range)."],
[0x121, "PICO_INVALID_NUMBER_CHANNELS_FOR_RESOLUTION",
"The number of channels which can be enabled is limited in " +
"15 and 16-bit modes"],
[0x122, "PICO_CHANNEL_DISABLED_DUE_TO_USB_POWERED",
"USB Power not sufficient to power all channels."]]
| {
"content_hash": "2d66efb470786753de34f689960cb5bf",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 79,
"avg_line_length": 51.67659574468085,
"alnum_prop": 0.6655961791831357,
"repo_name": "arunpersaud/pico-python",
"id": "47cd7c0dbc3b7ef28b33685343cfe716ec30dcb9",
"size": "12147",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "picoscope/error_codes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "412"
},
{
"name": "Python",
"bytes": "132761"
}
],
"symlink_target": ""
} |
import threading
import socket
import time
class WorkerThread(threading.Thread):
# task_queue = queue.Queue(maxsize=1)
def __init__(self, task_q, serial_task_q):
threading.Thread.__init__(self)
global task_queue
task_queue = task_q
global serial_task_queue
serial_task_queue = serial_task_q
def run(self):
while True:
task = task_queue.get(block=True).decode('ascii')
if not threading.main_thread().is_alive():
break
if task.startswith('L'):
# L<(LED_NUM).(RED).(GREEN).(BLUE)>*(#ofLEDSused)
is_led_phrase = False
led_phrase_counter = 0
led_phrases = []
for i in task:
if i == '<':
is_led_phrase = True
continue
if i == '>':
is_led_phrase = False
led_phrase_counter = led_phrase_counter + 1
continue
if is_led_phrase:
# led_phrases[led_phrase_counter] = led_phrases[led_phrase_counter] + i
try:
led_phrases[led_phrase_counter] = led_phrases[led_phrase_counter] + i
except IndexError:
led_phrases.append(i)
for led_phrase in led_phrases:
section_tracker = 0
taskb = []
for i in led_phrase:
if i == '.':
section_tracker = section_tracker + 1
continue
try:
taskb[section_tracker] = taskb[section_tracker] + i
except IndexError:
taskb.append(i)
taskc = []
try:
for i in range(0, 4):
taskc.append(int(taskb[i]))
except TypeError or IndexError:
continue
serial_task_queue.put(taskc)
# Special tasks go here
task_queue.task_done()
if not threading.main_thread().is_alive():
break
class SerialWorkerThread(threading.Thread):
def __init__(self, serial_task_q, ser, max_leds):
threading.Thread.__init__(self)
global serial_task_queue
serial_task_queue = serial_task_q
global serial_device
serial_device = ser
global max_leds_used
max_leds_used = max_leds
def run(self):
while True:
task = serial_task_queue.get(block=True)
if not threading.main_thread().is_alive():
break
if -1 < task[0] < max_leds_used and -1 < task[1] < 256 \
and -1 < task[2] < 256 and -1 < task[3] < 256:
led = task[0]
red = task[1]
green = task[2]
blue = task[3]
serial_device.write(led.to_bytes(1, byteorder='big'))
serial_device.write(red.to_bytes(1, byteorder='big'))
serial_device.write(green.to_bytes(1, byteorder='big'))
serial_device.write(blue.to_bytes(1, byteorder='big'))
serial_task_queue.task_done()
if not threading.main_thread().is_alive():
break
class StopThreads(threading.Thread):
def __init__(self, serial_task_q, task_q):
threading.Thread.__init__(self)
global task_queue
task_queue = task_q
global serial_task_queue
serial_task_queue = serial_task_q
def run(self):
time.sleep(1)
# Connect to server socket to unblock and thus check if main thread is up
while threading.main_thread().is_alive():
time.sleep(1)
host = socket.gethostname()
port = 23734
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(''.encode('ascii'))
s.close()
# Send empty task to queues to unblock and thus check if main thread is up
global serial_task_queue
serial_task_queue.put('')
global task_queue
task_queue.put(''.encode('ascii'))
| {
"content_hash": "c28a4fc0b26150ea6fd6dd49a9c59a7c",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 97,
"avg_line_length": 32.43382352941177,
"alnum_prop": 0.4801632282929041,
"repo_name": "Basecrayfish/LEDServer",
"id": "a72236f7ff7757a47792c2e50778f17deca0aee4",
"size": "4411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12898"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
('posts', '0004_auto_20151106_0329'),
]
operations = [
migrations.AddField(
model_name='post',
name='profile',
field=models.ForeignKey(to='users.UserProfile', default=1),
),
]
| {
"content_hash": "0be3e83dbaa169d4437631c0d46f8e0a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 22.57894736842105,
"alnum_prop": 0.5827505827505828,
"repo_name": "tuanquanghpvn/bideox",
"id": "11f86235e664a5731df60279bff2a56fb4b81f26",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/posts/migrations/0005_post_profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "503317"
},
{
"name": "HTML",
"bytes": "1808267"
},
{
"name": "JavaScript",
"bytes": "2866114"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "35633"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from chatpro.rooms.models import Room
from dash.orgs.models import Org
from dash.utils import intersection
from dash.utils.sync import ChangeType
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from temba.types import Contact as TembaContact
from uuid import uuid4
from .tasks import push_contact_change
class AbstractParticipant(models.Model):
full_name = models.CharField(verbose_name=_("Full name"), max_length=128, null=True)
chat_name = models.CharField(verbose_name=_("Chat name"), max_length=16, null=True,
help_text=_("Shorter name used for chat messages"))
class Meta:
abstract = True
class Contact(AbstractParticipant):
"""
Corresponds to a RapidPro contact who is tied to a single room
"""
uuid = models.CharField(max_length=36, unique=True)
org = models.ForeignKey(Org, verbose_name=_("Organization"), related_name='contacts')
room = models.ForeignKey(Room, verbose_name=_("Room"), related_name='contacts',
help_text=_("Room which this contact belongs in"))
urn = models.CharField(verbose_name=_("URN"), max_length=255)
is_active = models.BooleanField(default=True, help_text=_("Whether this contact is active"))
created_by = models.ForeignKey(User, null=True, related_name="contact_creations",
help_text="The user which originally created this item")
created_on = models.DateTimeField(auto_now_add=True,
help_text="When this item was originally created")
modified_by = models.ForeignKey(User, null=True, related_name="contact_modifications",
help_text="The user which last modified this item")
modified_on = models.DateTimeField(auto_now=True,
help_text="When this item was last modified")
@classmethod
def create(cls, org, user, full_name, chat_name, urn, room, uuid=None):
if org.id != room.org_id: # pragma: no cover
raise ValueError("Room does not belong to org")
# if we don't have a UUID, then we created this contact
if not uuid:
do_push = True
uuid = unicode(uuid4())
else:
do_push = False
# create contact
contact = cls.objects.create(org=org, full_name=full_name, chat_name=chat_name, urn=urn, room=room, uuid=uuid,
created_by=user, modified_by=user)
if do_push:
contact.push(ChangeType.created)
return contact
@classmethod
def kwargs_from_temba(cls, org, temba_contact):
org_room_uuids = [r.uuid for r in Room.get_all(org)]
room_uuids = intersection(org_room_uuids, temba_contact.groups)
room = Room.objects.get(org=org, uuid=room_uuids[0]) if room_uuids else None
if not room:
raise ValueError("No room with uuid in %s" % ", ".join(temba_contact.groups))
return dict(org=org,
full_name=temba_contact.name,
chat_name=temba_contact.fields.get(org.get_chat_name_field(), None),
urn=temba_contact.urns[0],
room=room,
uuid=temba_contact.uuid)
def as_temba(self):
temba_contact = TembaContact()
temba_contact.name = self.full_name
temba_contact.urns = [self.urn]
temba_contact.fields = {self.org.get_chat_name_field(): self.chat_name}
temba_contact.groups = [self.room.uuid]
temba_contact.uuid = self.uuid
return temba_contact
def push(self, change_type):
push_contact_change.delay(self.id, change_type)
def get_urn(self):
return tuple(self.urn.split(':', 1))
def release(self):
self.is_active = False
self.save()
self.push(ChangeType.deleted)
def as_participant_json(self):
return dict(id=self.id, type='C', full_name=self.full_name, chat_name=self.chat_name)
def __unicode__(self):
if self.full_name:
return self.full_name
elif self.chat_name:
return self.chat_name
else:
return self.get_urn()[1]
class Profile(AbstractParticipant):
"""
Extension for the user class
"""
user = models.OneToOneField(User)
change_password = models.BooleanField(default=False, help_text=_("User must change password on next login"))
def as_participant_json(self):
return dict(id=self.user_id, type='U', full_name=self.full_name, chat_name=self.chat_name)
| {
"content_hash": "0cbd1fd52139d7e8549ad1e9b369bc3c",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 118,
"avg_line_length": 37.1328125,
"alnum_prop": 0.6225541763096991,
"repo_name": "rapidpro/chatpro",
"id": "f8b15f8afd5806b97eaab8b231b7fdb6c99ca614",
"size": "4753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chatpro/profiles/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2638"
},
{
"name": "CoffeeScript",
"bytes": "12697"
},
{
"name": "HTML",
"bytes": "22196"
},
{
"name": "Python",
"bytes": "142459"
}
],
"symlink_target": ""
} |
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_lldp
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage LLDP configuration on VyOS network devices
description:
- This module provides declarative management of LLDP service
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
state:
description:
- State of the LLDP configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: Enable LLDP service
vyos_lldp:
state: present
- name: Disable LLDP service
vyos_lldp:
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set service lldp
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.vyos import get_config, load_config
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def has_lldp(module):
config = get_config(module).splitlines()
if "set service 'lldp'" in config or 'set service lldp' in config:
return True
else:
return False
def main():
""" main entry point for module execution
"""
argument_spec = dict(
interfaces=dict(type='list'),
state=dict(default='present',
choices=['present', 'absent',
'enabled', 'disabled'])
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
HAS_LLDP = has_lldp(module)
commands = []
if module.params['state'] == 'absent' and HAS_LLDP:
commands.append('delete service lldp')
elif module.params['state'] == 'present' and not HAS_LLDP:
commands.append('set service lldp')
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| {
"content_hash": "9a790a23e42cf4ece9dff15df1efde1e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 93,
"avg_line_length": 24.7,
"alnum_prop": 0.6404858299595142,
"repo_name": "SergeyCherepanov/ansible",
"id": "88159ea4c4af5db9e2eddd28d9a193f95306d820",
"size": "3217",
"binary": false,
"copies": "56",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/network/vyos/vyos_lldp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
"""The device tracker tests for the Mazda Connected Services integration."""
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.const import ATTR_SOURCE_TYPE
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
)
from homeassistant.helpers import entity_registry as er
from . import init_integration
async def test_device_tracker(hass):
"""Test creation of the device tracker."""
await init_integration(hass)
entity_registry = er.async_get(hass)
state = hass.states.get("device_tracker.my_mazda3_device_tracker")
assert state
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "My Mazda3 Device Tracker"
assert state.attributes.get(ATTR_ICON) == "mdi:car"
assert state.attributes.get(ATTR_LATITUDE) == 1.234567
assert state.attributes.get(ATTR_LONGITUDE) == -2.345678
assert state.attributes.get(ATTR_SOURCE_TYPE) == SOURCE_TYPE_GPS
entry = entity_registry.async_get("device_tracker.my_mazda3_device_tracker")
assert entry
assert entry.unique_id == "JM000000000000000"
| {
"content_hash": "d887123fa0ab5127e7ea3df908b2c03e",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 38.233333333333334,
"alnum_prop": 0.7436791630340017,
"repo_name": "toddeye/home-assistant",
"id": "4af367c1c04b1e5c0aa54c03a43f9a78d3602c84",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/mazda/test_device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'EnginePatch.migrate_engine_equivalent_patch'
db.delete_column(u'physical_enginepatch', 'migrate_engine_equivalent_patch_id')
def backwards(self, orm):
# Adding field 'EnginePatch.migrate_engine_equivalent_patch'
db.add_column(u'physical_enginepatch', 'migrate_engine_equivalent_patch',
self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'backwards_engine_patch', null=True, to=orm['physical.EnginePatch'], on_delete=models.SET_NULL, blank=True),
keep_default=False)
models = {
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'backup_hour': ('django.db.models.fields.IntegerField', [], {}),
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'engine_patch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EnginePatch']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'maintenance_day': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'maintenance_window': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'major_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'minor_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginepatch': {
'Meta': {'object_name': 'EnginePatch'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'patchs'", 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_initial_patch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patch_path': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_version': ('django.db.models.fields.PositiveIntegerField', [], {}),
'required_disk_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'root_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ssl_expire_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recreate_slave': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'original_vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical'] | {
"content_hash": "e95dab7f7b459c99fcba0f6a09ec85b6",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 239,
"avg_line_length": 94.0105633802817,
"alnum_prop": 0.5653395258249373,
"repo_name": "globocom/database-as-a-service",
"id": "06bb158d0aee23dc8e389c5a979072e353e0eb9d",
"size": "26723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/physical/migrations/0092_auto__del_field_enginepatch_migrate_engine_equivalent_patch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
} |
import bouncebox.core.api as bb
import pandas as pd
class TestEventComponent(bb.Component):
listeners = [(bb.Event, "handle_event")]
def __init__(self):
super(TestEventComponent, self).__init__()
self.sum = 0
def handle_event(self, event):
print event
def end(self, _event):
print 'THE END'
ind = pd.date_range(start="2000-01-01", freq="D", periods=10)
# create events
events = (bb.Event(ts) for ts in ind)
box = bb.BounceBox()
# add source, EventBroadcaster will broadcast an iterable
source = bb.EventBroadcaster(events)
box.add_source(source)
comp = TestEventComponent()
box.add_component(comp)
box.start_box()
| {
"content_hash": "8aad4b15ea7ae9fbacaa2eec9245a5ab",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 22.366666666666667,
"alnum_prop": 0.6736214605067065,
"repo_name": "dalejung/bouncebox",
"id": "336f3ae37ab299f23f254c9f4301590c31ead743",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90980"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui
from resources import Resources
class Systray(QtGui.QSystemTrayIcon):
urgent = False
def __init__(self, window):
super(Systray, self).__init__(QtGui.QIcon.fromTheme("scudcloud"), window)
self.connect(self, QtCore.SIGNAL("activated(QSystemTrayIcon::ActivationReason)"), self.activatedEvent)
self.window = window
self.setToolTip(Resources.APP_NAME)
self.menu = QtGui.QMenu(self.window)
self.menu.addAction('Show', self.restore)
self.menu.addSeparator()
self.menu.addAction(self.window.menus["file"]["preferences"])
self.menu.addAction(self.window.menus["help"]["about"])
self.menu.addSeparator()
self.menu.addAction(self.window.menus["file"]["exit"])
self.setContextMenu(self.menu)
def alert(self):
if not self.urgent:
self.urgent = True
self.setIcon(QtGui.QIcon.fromTheme("scudcloud-attention"))
def stopAlert(self):
self.urgent = False
self.setIcon(QtGui.QIcon.fromTheme("scudcloud"))
def setCounter(self, i):
if 0 == i:
if True == self.urgent:
self.setIcon(QtGui.QIcon.fromTheme("scudcloud-attention"))
else:
self.setIcon(QtGui.QIcon.fromTheme("scudcloud"))
elif i > 0 and i < 10:
self.setIcon(QtGui.QIcon.fromTheme("scudcloud-attention-"+str(i)))
elif i > 9:
self.setIcon(QtGui.QIcon.fromTheme("scudcloud-attention-9-plus"))
def restore(self):
self.window.show()
self.stopAlert()
def activatedEvent(self, reason):
if reason in [QtGui.QSystemTrayIcon.MiddleClick, QtGui.QSystemTrayIcon.Trigger]:
if self.window.isHidden() or self.window.isMinimized() or not self.window.isActiveWindow():
self.restore()
else:
self.window.hide()
| {
"content_hash": "f7d448b707c5767e485bc1d39a872406",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 110,
"avg_line_length": 37.745098039215684,
"alnum_prop": 0.623896103896104,
"repo_name": "aikikode/scudcloud",
"id": "1cc38bf2358d45b0d1d012945e5bc5b6a5fa4b20",
"size": "1925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scudcloud-1.0/lib/systray.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60"
},
{
"name": "HTML",
"bytes": "784"
},
{
"name": "JavaScript",
"bytes": "3860"
},
{
"name": "Makefile",
"bytes": "114"
},
{
"name": "Python",
"bytes": "44575"
},
{
"name": "Shell",
"bytes": "2906"
}
],
"symlink_target": ""
} |
import os
import re
# ccbb libraries
from ccbbucsd.utilities.analysis_run_prefixes import get_timestamp, get_run_prefix
from ccbbucsd.utilities.notebook_runner import execute_notebook, export_notebook_to_html
__author__ = "Amanda Birmingham"
__maintainer__ = "Amanda Birmingham"
__email__ = "[email protected]"
__status__ = "development"
DATASET_NAME_KEY = "g_dataset_name"
ALG_NAME_KEY = "g_count_alg_name"
def execute_run(possible_actions_dict, run_params, ordered_run_steps, parent_dir, run_folder=None):
timestamp = get_timestamp()
run_prefix = _generate_run_prefix(run_params, timestamp)
if run_folder is None:
run_dir = _make_run_dir(parent_dir, run_prefix)
else:
run_dir = os.path.join(parent_dir, run_folder)
methods_dir = _create_run_and_methods_dirs(run_dir)
for run_action in ordered_run_steps:
step_details = possible_actions_dict[run_action]
run_and_output_notebook(step_details, run_params, timestamp, run_prefix, run_dir, methods_dir)
def run_and_output_notebook(step_settings, params_dict, timestamp, run_prefix, run_dir, methods_dir):
run_path = step_settings[0]
base_notebook_filename = step_settings[1]
formatted_params_dict = _format_parameters(run_dir, timestamp, run_prefix, params_dict)
notebook_out_fp = _get_output_fp(base_notebook_filename, timestamp, methods_dir, ".ipynb")
execute_notebook(base_notebook_filename, notebook_out_fp, formatted_params_dict, run_path)
export_notebook_to_html(notebook_out_fp, methods_dir)
def _generate_run_prefix(run_params, timestamp):
dataset_name = run_params[DATASET_NAME_KEY]
alg_name = run_params[ALG_NAME_KEY]
return get_run_prefix(dataset_name, alg_name, timestamp)
def _make_run_dir(parent_path, run_prefix):
return os.path.join(parent_path, run_prefix)
def _create_run_and_methods_dirs(run_dir_name):
methods_dir_name = os.path.join(run_dir_name, _get_methods_folder_name())
# makedirs differs from mkdir in that it will make intermediate directories that don't already exist--
# in this case, it will make the run dir that is the parent of the methods dir
os.makedirs(methods_dir_name, exist_ok=True) # True = is OK if path already exists
return methods_dir_name
def _get_methods_folder_name():
return "methods"
def _mangle_notebook_name(timestamp, notebook_filename):
delimiter = "_"
name_base, _ = os.path.splitext(notebook_filename)
lower_base = name_base.lower()
delimited_notebook_base = re.sub("\s+", delimiter, lower_base)
new_base = "{0}{1}{2}".format(timestamp, delimiter, delimited_notebook_base)
return new_base
def _get_output_fp(notebook_fp, timestamp, methods_dir, output_ext):
_, notebook_name = os.path.split(notebook_fp)
new_base = _mangle_notebook_name(timestamp, notebook_name)
new_fp = os.path.join(methods_dir, new_base + output_ext)
return new_fp
def _format_parameters(output_dir, timestamp, run_prefix, params_dict):
result = {}
for curr_param_key, curr_param_item in params_dict.items():
if hasattr(curr_param_item, 'format'): # if this item has a format method
final_item = curr_param_item.format(run_dir=output_dir, timestamp=timestamp, run_prefix=run_prefix)
else:
final_item = curr_param_item
result[curr_param_key] = final_item
return result
| {
"content_hash": "df434bdf39c0b759e8f818bf9f1b03a5",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 111,
"avg_line_length": 37.68888888888889,
"alnum_prop": 0.7037146226415094,
"repo_name": "ucsd-ccbb/jupyter-genomics",
"id": "a1670575b03a25cc5e89bfcaf08724a2df052c89",
"size": "3413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/crispr/ccbbucsd/utilities/notebook_pipeliner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "254329"
},
{
"name": "Java",
"bytes": "278021"
},
{
"name": "Jupyter Notebook",
"bytes": "19771596"
},
{
"name": "Perl",
"bytes": "14052"
},
{
"name": "Python",
"bytes": "428899"
},
{
"name": "R",
"bytes": "6817"
},
{
"name": "Shell",
"bytes": "37476"
}
],
"symlink_target": ""
} |
import io
import logging
import os
import pathlib
import pstats
import tempfile
import unittest
from unittest import mock
import openhtf
from openhtf import plugs
from openhtf.core import base_plugs
from openhtf.util import test
from openhtf.util import validators
class DummyError(Exception):
"""Raised for testing phases that raise."""
class MyPlug(base_plugs.BasePlug):
"""Stub plug for ensuring plugs get mocked correctly."""
def __init__(self):
raise NotImplementedError('MyPlug not mocked correctly')
def do_stuff(self, unused):
raise NotImplementedError('MyPlug not mocked correctly')
class ShamelessPlug(base_plugs.BasePlug):
"""Shamelessly plugs itself."""
def plug_away(self):
logging.info('%s is best plug.', self.__class__.__name__)
class ShamelessPlugStub(base_plugs.BasePlug):
"""Stub/fake implementation for ShamelessPlug."""
plug_away_call_counts: int
def __init__(self):
super().__init__()
self.plug_away_call_counts = 0
def plug_away(self):
self.plug_away_call_counts += 1
_DO_STUFF_RETVAL = 0xBEEF
@plugs.plug(my_plug=MyPlug, shameless_plug=ShamelessPlug)
@openhtf.measures('test_measurement', 'othr_measurement')
@openhtf.measures('passes', validators=[validators.in_range(1, 10)])
@openhtf.measures('fails', validators=[validators.in_range(1, 10)])
@openhtf.measures('unset_measurement')
def test_phase(phase_data, my_plug, shameless_plug: ShamelessPlug):
shameless_plug.plug_away()
phase_data.logger.error('in phase_data %s', id(phase_data))
phase_data.logger.error('in measurements %s', id(phase_data.measurements))
phase_data.measurements.test_measurement = my_plug.do_stuff('stuff_args')
phase_data.measurements.othr_measurement = 0xDEAD
phase_data.measurements.passes = 5
phase_data.measurements.fails = 20
phase_data.test_record.add_outcome_details(0xBED)
@plugs.plug(shameless_plug=ShamelessPlug)
def test_phase_with_shameless_plug(phase_data, shameless_plug: ShamelessPlug):
shameless_plug.plug_away()
phase_data.logger.info('Done using plug')
def raising_phase():
raise DummyError('This Phase raises!')
def phase_retval(retval):
"""Helper function to generate a phase that returns the given retval."""
def phase():
return retval
return phase
class PatchPlugsTest(unittest.TestCase):
def test_patch_plugs_fails_for_bad_subtype(self):
class NormalUnitTest(unittest.TestCase):
@test.yields_phases
def test_bad(self):
_ = yield test_phase
with self.assertRaises(AssertionError):
case = NormalUnitTest(methodName='test_bad')
case.test_bad()
class TestTest(test.TestCase):
def test_execute_phase_or_test_phase_with_no_patched_plugs(self):
phase_record = self.execute_phase_or_test(test_phase_with_shameless_plug)
self.assertPhaseContinue(phase_record)
def test_execute_phase_or_test_test_with_no_patched_plugs(self):
test_record = self.execute_phase_or_test(
openhtf.Test(test_phase_with_shameless_plug))
self.assertTestPass(test_record)
def test_execute_phase_or_test_phase_with_patched_plugs(self):
"""Example of partial patching of plugs."""
self.auto_mock_plugs(MyPlug)
shameless_plug = ShamelessPlug()
self.plugs[ShamelessPlug] = shameless_plug
with mock.patch.object(
shameless_plug, shameless_plug.plug_away.__name__,
autospec=True) as mocked_plug_away:
phase_record = self.execute_phase_or_test(test_phase)
mocked_plug_away.assert_called_once_with()
self.assertPhaseContinue(phase_record)
def test_execute_phase_or_test_phase_with_stub_plugs(self):
"""Example using stubs/fakes for plugs."""
self.auto_mock_plugs(MyPlug)
# Tells the test executor to substitute ShamelessPlugStub for any phases
# using ShamelessPlug.
self.plugs[ShamelessPlug] = ShamelessPlugStub()
phase_record = self.execute_phase_or_test(test_phase)
self.assertEqual(self.plugs[ShamelessPlug].plug_away_call_counts, 1)
self.assertPhaseContinue(phase_record)
def _run_my_phase_in_test_asserts(self, mock_my_plug, test_record):
mock_my_plug.do_stuff.assert_called_with('stuff_args')
# The test fails because the 'fails' measurement fails.
self.assertTestFail(test_record)
self.assertTestOutcomeCode(test_record, 0xBED)
self.assertNotMeasured(test_record, 'unset_measurement')
self.assertNotMeasured(test_record.phases[-1], 'unset_measurement')
self.assertMeasured(test_record, 'test_measurement', _DO_STUFF_RETVAL)
self.assertMeasured(test_record, 'othr_measurement', 0xDEAD)
self.assertMeasurementPass(test_record, 'passes')
self.assertMeasurementFail(test_record, 'fails')
def test_execute_phase_or_test_test_with_patched_plugs(self):
self.auto_mock_plugs(MyPlug)
self.plugs[MyPlug].do_stuff.return_value = _DO_STUFF_RETVAL
shameless_plug = ShamelessPlug()
self.plugs[ShamelessPlug] = shameless_plug
with mock.patch.object(
shameless_plug, shameless_plug.plug_away.__name__,
autospec=True) as mocked_plug_away:
test_record = self.execute_phase_or_test(openhtf.Test(test_phase))
mocked_plug_away.assert_called_once_with()
self._run_my_phase_in_test_asserts(self.plugs[MyPlug], test_record)
@test.yields_phases
def test_phase_retvals(self):
phase_record = yield phase_retval(openhtf.PhaseResult.CONTINUE)
self.assertPhaseContinue(phase_record)
phase_record = yield phase_retval(openhtf.PhaseResult.REPEAT)
self.assertPhaseRepeat(phase_record)
phase_record = yield phase_retval(openhtf.PhaseResult.STOP)
self.assertPhaseStop(phase_record)
@test.patch_plugs(mock_plug='.'.join((MyPlug.__module__, MyPlug.__name__)))
def test_patch_plugs_phase(self, mock_plug):
mock_plug.do_stuff.return_value = _DO_STUFF_RETVAL
phase_record = yield test_phase
mock_plug.do_stuff.assert_called_with('stuff_args')
self.assertIs(self.plugs[MyPlug], mock_plug)
self.assertIsInstance(self.plugs[ShamelessPlug], ShamelessPlug)
self.assertPhaseContinue(phase_record)
self.assertEqual('test_phase', phase_record.name)
self.assertMeasured(phase_record, 'test_measurement', _DO_STUFF_RETVAL)
self.assertMeasured(phase_record, 'othr_measurement', 0xDEAD)
self.assertMeasurementPass(phase_record, 'passes')
self.assertMeasurementFail(phase_record, 'fails')
@test.patch_plugs(mock_plug='.'.join((MyPlug.__module__, MyPlug.__name__)))
def test_patch_plugs_test(self, mock_plug):
mock_plug.do_stuff.return_value = _DO_STUFF_RETVAL
test_record = yield openhtf.Test(phase_retval(None), test_phase)
self._run_my_phase_in_test_asserts(mock_plug, test_record)
@unittest.expectedFailure
@test.yields_phases
def test_strict_measurement(self):
phase_record = yield phase_retval(None)
self.assertNotMeasured(phase_record, 'unset_measurement')
@unittest.expectedFailure
@test.yields_phases
def test_wrong_measured_value(self):
test_rec = yield openhtf.Test(phase_retval(None))
self.assertMeasured(test_rec, 'test_measurement', 0xBAD)
@test.yields_phases
def test_passing_test(self):
test_record = yield openhtf.Test(phase_retval(None))
self.assertTestPass(test_record)
@test.yields_phases
def test_errors(self):
phase_record = yield raising_phase
self.assertPhaseError(phase_record, DummyError)
test_record = yield openhtf.Test(raising_phase)
self.assertTestError(test_record, DummyError)
def test_bad_assert(self):
with self.assertRaises(test.InvalidTestError): # pylint: disable=g-error-prone-assert-raises
self.assertMeasured(None)
def test_doesnt_yield(self):
def doesnt_yield(cls_self): # pylint: disable=unused-argument
pass
with self.assertRaises(test.InvalidTestError):
test.yields_phases(doesnt_yield)(self)
def test_bad_mock_plug_args(self):
# Stub test* method that one might wrap with test.patch_plugs().
def stub_test_method(cls_self, plug_one, plug_two): # pylint: disable=unused-argument
pass
# Test that we catch weird extra test method args.
with self.assertRaises(test.InvalidTestError):
test.patch_plugs(plug_one='unused')(stub_test_method)
# Test that we catch mocks that aren't expected.
with self.assertRaises(test.InvalidTestError):
test.patch_plugs(
plug_one='unused', plug_two='unused', plug_three='unused')(
stub_test_method)
# Test that we catch weird plug specifications.
with self.assertRaises(ValueError):
test.patch_plugs(
plug_one='bad_spec_no_dots', plug_two='unused')(
stub_test_method)
with self.assertRaises(KeyError):
test.patch_plugs(
plug_one='bad.spec.invalid.module', plug_two='also.bad')(
stub_test_method)
def test_bad_yield(self):
def bad_test(cls_self): # pylint: disable=unused-argument
yield None
# The InvalidTestError gets raised upon initial invocation of the test
# method, so we need to do the wrapping inside the assertRaises context
# rather than using the decorator on this test method itself and only
# wrapping the yield statement in the assertRaises context.
with self.assertRaises(test.InvalidTestError):
test.yields_phases(bad_test)(self)
class PhaseProfilingTest(test.TestCase):
"""Test profiling an OpenHTF phase in unit testing.
Do this in its own fixture to avoid noise from other test methods.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._profile_tempdir = tempfile.TemporaryDirectory()
cls.set_profile_dir(pathlib.Path(cls._profile_tempdir.name))
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._profile_tempdir.cleanup()
def test_profile_phase(self):
self.execute_phase_or_test(test_phase_with_shameless_plug)
with io.StringIO() as output_stream:
stats = pstats.Stats(
str(self.get_profile_filepath()), stream=output_stream)
stats.print_stats(test_phase_with_shameless_plug.name)
output_stream.seek(0)
output = output_stream.read()
self.assertIn(
test_phase_with_shameless_plug.func.__module__.replace(
'.', os.path.sep), output)
class TestProfilingTest(test.TestCase):
"""Test profiling an OpenHTF test in unit testing.
Do this in its own fixture to avoid noise from other test methods.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._profile_tempdir = tempfile.TemporaryDirectory()
cls.set_profile_dir(pathlib.Path(cls._profile_tempdir.name))
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._profile_tempdir.cleanup()
def test_profile_test(self):
self.execute_phase_or_test(openhtf.Test(test_phase_with_shameless_plug))
with io.StringIO() as output_stream:
stats = pstats.Stats(
str(self.get_profile_filepath()), stream=output_stream)
stats.print_stats(test_phase_with_shameless_plug.name)
output_stream.seek(0)
output = output_stream.read()
self.assertIn(
test_phase_with_shameless_plug.func.__module__.replace(
'.', os.path.sep), output)
| {
"content_hash": "3a5be7738ac8e3bb8471d91339a8cca1",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 97,
"avg_line_length": 34.495384615384616,
"alnum_prop": 0.7149228436357149,
"repo_name": "google/openhtf",
"id": "af139999a639da5a5d5ab0f9b1636f5eeda9a3b3",
"size": "11805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/util/test_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "24871"
},
{
"name": "JavaScript",
"bytes": "11873"
},
{
"name": "Python",
"bytes": "1266905"
},
{
"name": "SCSS",
"bytes": "29020"
},
{
"name": "TypeScript",
"bytes": "154488"
}
],
"symlink_target": ""
} |
import tulip
messages = tulip.DataBuffer()
messages.feed_data('a message')
@tulip.task
def print_messages():
while True:
print((yield from messages.read()))
print_task = print_messages()
loop = tulip.get_event_loop()
loop.call_later(1, print_task.cancel)
loop.call_later(2, messages.feed_eof)
loop.call_later(3, loop.stop)
loop.run_forever()
| {
"content_hash": "b89074ae6e44f4d998db0f0b7c07537f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 43,
"avg_line_length": 21.058823529411764,
"alnum_prop": 0.7122905027932961,
"repo_name": "bslatkin/pycon2014",
"id": "2e1e39db822090c22865ebe65a91360e2ae902bc",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/asyncio-0.4.1/aymeric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "38501"
},
{
"name": "Python",
"bytes": "813564"
}
],
"symlink_target": ""
} |
""" Tasks for running a command in a subprocess
Command - run a command with optional environment variables
SalesforceCommand - run a command with credentials passed
SalesforceBrowserTest - a task designed to wrap browser testing that could
run locally or remotely
"""
import json
import os
import subprocess
import sys
from cumulusci.core.exceptions import CommandException
from cumulusci.core.exceptions import BrowserTestFailure
from cumulusci.core.tasks import BaseTask
from cumulusci.core.utils import process_bool_arg
class Command(BaseTask):
""" Execute a shell command in a subprocess """
task_options = {
'command': {
'description': 'The command to execute',
'required': True,
},
'dir': {
'description': 'If provided, the directory where the command '
'should be run from.',
},
'env': {
'description': 'Environment variables to set for command. Must '
'be flat dict, either as python dict from YAML or '
'as JSON string.',
},
'pass_env': {
'description': 'If False, the current environment variables '
'will not be passed to the child process. '
'Defaults to True',
'required': True,
},
'interactive': {
'description': 'If True, the command will use stderr, stdout, '
'and stdin of the main process.'
'Defaults to False.',
},
}
def _init_options(self, kwargs):
super(Command, self)._init_options(kwargs)
if 'pass_env' not in self.options:
self.options['pass_env'] = True
if 'dir' not in self.options or not self.options['dir']:
self.options['dir'] = '.'
if 'interactive' not in self.options:
self.options['interactive'] = False
if 'env' not in self.options:
self.options['env'] = {}
else:
try:
self.options['env'] = json.loads(self.options['env'])
except TypeError:
# assume env is already dict
pass
def _run_task(self):
env = self._get_env()
self._run_command(env)
def _get_env(self):
if process_bool_arg(self.options['pass_env']):
env = os.environ.copy()
else:
env = {}
env.update(self.options['env'])
return env
def _process_output(self, line):
self.logger.info(line.rstrip())
def _handle_returncode(self, returncode, stderr):
if returncode:
message = 'Return code: {}\nstderr: {}'.format(
returncode,
stderr,
)
self.logger.error(message)
raise CommandException(message)
def _run_command(self, env, command=None, output_handler=None, return_code_handler=None):
if not command:
command = self.options['command']
interactive_mode = process_bool_arg(self.options['interactive'])
self.logger.info('Running command: %s', command)
p = subprocess.Popen(
command,
stdout=sys.stdout if interactive_mode else subprocess.PIPE,
stderr=sys.stderr if interactive_mode else subprocess.PIPE,
stdin=sys.stdin if interactive_mode else subprocess.PIPE,
bufsize=1,
shell=True,
env=env,
cwd=self.options.get('dir'),
)
if not interactive_mode:
# Handle output lines
if not output_handler:
output_handler = self._process_output
for line in iter(p.stdout.readline, ''):
output_handler(line)
p.stdout.close()
p.wait()
# Handle return code
if not return_code_handler:
return_code_handler = self._handle_returncode
return_code_handler(p.returncode, p.stderr)
class SalesforceCommand(Command):
""" Execute a Command with SF credentials provided on the environment.
Provides:
* SF_INSTANCE_URL
* SF_ACCESS_TOKEN
"""
salesforce_task = True
def _update_credentials(self):
self.org_config.refresh_oauth_token(self.project_config.keychain)
def _get_env(self):
env = super(SalesforceCommand, self)._get_env()
env['SF_ACCESS_TOKEN'] = self.org_config.access_token
env['SF_INSTANCE_URL'] = self.org_config.instance_url
return env
task_options = Command.task_options.copy()
task_options['extra'] = {
'description': 'If provided, will be appended to the end of the '
'command. Use to pass extra args to the command.',
'required': False,
}
task_options['use_saucelabs'] = {
'description': 'If True, use SauceLabs to run the tests. The '
'SauceLabs credentials will be fetched from the '
'saucelabs service in the keychain and passed as '
'environment variables to the command. Defaults to '
'False to run tests in the local browser.',
'required': True,
}
class SalesforceBrowserTest(SalesforceCommand):
""" Execute a Browser Test command locally or on SauceLabs """
task_options = task_options
def _init_options(self, kwargs):
super(SalesforceBrowserTest, self)._init_options(kwargs)
if (
'use_saucelabs' not in self.options or
self.options['use_saucelabs'] == 'False'
):
self.options['use_saucelabs'] = False
if 'extra' in self.options and self.options['extra']:
self.options['command'] = '{command} {extra}'.format(
**self.options
)
def _get_env(self):
env = super(SalesforceBrowserTest, self)._get_env()
if self.options['use_saucelabs']:
saucelabs = self.project_config.keychain.get_service('saucelabs')
env['SAUCE_NAME'] = saucelabs.username
env['SAUCE_KEY'] = saucelabs.api_key
env['RUN_ON_SAUCE'] = 'True'
else:
env['RUN_LOCAL'] = 'True'
return env
def _handle_returncode(self, returncode, stderr):
if returncode == 1:
message = 'Return code: {}\nstderr: {}'.format(
returncode,
stderr,
)
raise BrowserTestFailure(message)
elif returncode:
super(SalesforceBrowserTest, self)._handle_returncode(returncode, stderr)
| {
"content_hash": "351e5f61e6336d8b2844b6913027a3cb",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 93,
"avg_line_length": 34.41237113402062,
"alnum_prop": 0.5681545835829839,
"repo_name": "e02d96ec16/CumulusCI",
"id": "ff85b2168c257a8843d3eb2290e496b2227ab493",
"size": "6676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/tasks/command.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "641697"
},
{
"name": "RobotFramework",
"bytes": "9270"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
} |
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler, HTTPServer
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
def get_input():
import argparse
parser = argparse.ArgumentParser(description='test http server')
parser.add_argument('--host', '-ho', default='localhost',
help='using host')
parser.add_argument('--port', '-p', type=int, default=8000,
help='using port')
return parser.parse_args()
def main():
args = get_input()
server = ThreadingSimpleServer((args.host, args.port), SimpleHTTPRequestHandler)
try:
print('serving at', args.host, ':', args.port,)
while True:
server.handle_request()
except KeyboardInterrupt:
print("Finished")
if __name__ == '__main__':
main()
| {
"content_hash": "8e25657c49944a5a9fa07432f624d827",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 84,
"avg_line_length": 27.70967741935484,
"alnum_prop": 0.6344586728754366,
"repo_name": "umyuu/Sample",
"id": "dcb5c25761f49ffd45b12f93d358adec37cc842e",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/html/template/httpserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2427"
},
{
"name": "CSS",
"bytes": "60"
},
{
"name": "HTML",
"bytes": "14520"
},
{
"name": "Java",
"bytes": "54389"
},
{
"name": "JavaScript",
"bytes": "9836"
},
{
"name": "Python",
"bytes": "134026"
},
{
"name": "Visual Basic",
"bytes": "2043"
}
],
"symlink_target": ""
} |
"""Tests for the parsers manager."""
import unittest
from plaso.parsers import interface
from plaso.parsers import manager
from plaso.parsers import plugins
from tests import test_lib as shared_test_lib
class TestParser(interface.BaseParser):
"""Test parser."""
NAME = u'test_parser'
DESCRIPTION = u'Test parser.'
# pylint: disable=unused-argument
def Parse(self, parser_mediator, **kwargs):
"""Parses the file entry and extracts event objects.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
"""
return
class TestParserWithPlugins(interface.BaseParser):
"""Test parser with plugins."""
NAME = u'test_parser_with_plugins'
DESCRIPTION = u'Test parser with plugins.'
_plugin_classes = {}
# pylint: disable=unused-argument
def Parse(self, parser_mediator, **kwargs):
"""Parses the file entry and extracts event objects.
Args:
parser_mediator: a parser mediator object (instance of ParserMediator).
"""
return
class TestPlugin(plugins.BasePlugin):
"""Test plugin."""
NAME = u'test_plugin'
DESCRIPTION = u'Test plugin.'
# pylint: disable=unused-argument
def Process(self, parser_mediator, **kwargs):
"""Evaluates if this is the correct plugin and processes data accordingly.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
kwargs: Depending on the plugin they may require different sets of
arguments to be able to evaluate whether or not this is
the correct plugin.
Raises:
ValueError: When there are unused keyword arguments.
"""
return
class ParsersManagerTest(shared_test_lib.BaseTestCase):
"""Tests for the parsers manager."""
# pylint: disable=protected-access
def testGetParserFilters(self):
"""Tests the _GetParserFilters function."""
parser_filter_expression = u''
includes, excludes = manager.ParsersManager._GetParserFilters(
parser_filter_expression)
self.assertEqual(includes, {})
self.assertEqual(excludes, {})
parser_filter_expression = u'test_include,!test_exclude'
includes, excludes = manager.ParsersManager._GetParserFilters(
parser_filter_expression)
self.assertEqual(includes, {u'test_include': []})
self.assertEqual(excludes, {})
parser_filter_expression = (
u'test_include,test_intersection,!test_exclude,!test_intersection')
includes, excludes = manager.ParsersManager._GetParserFilters(
parser_filter_expression)
self.assertEqual(includes, {u'test_include': []})
self.assertEqual(excludes, {})
parser_filter_expression = u'test/include,!test/exclude'
includes, excludes = manager.ParsersManager._GetParserFilters(
parser_filter_expression)
self.assertEqual(includes, {u'test': [u'include']})
self.assertEqual(excludes, {u'test': [u'exclude']})
parser_filter_expression = (
u'test/include,test/intersection,!test/exclude,!test/intersection')
includes, excludes = manager.ParsersManager._GetParserFilters(
parser_filter_expression)
self.assertEqual(includes, {u'test': [u'include']})
self.assertEqual(excludes, {u'test': [u'exclude', u'intersection']})
def testGetParsersFromPresetCategory(self):
"""Tests the _GetParsersFromPresetCategory function."""
expected_parser_names = sorted([
u'bencode', u'esedb', u'filestat', u'sqlite/google_drive', u'java_idx',
u'lnk', u'mcafee_protection', u'olecf', u'openxml', u'pe', u'prefetch',
u'sccm', u'skydrive_log', u'skydrive_log_old', u'sqlite/skype',
u'symantec_scanlog', u'binary_cookies', u'chrome_cache',
u'sqlite/chrome_cookies', u'sqlite/chrome_extension_activity',
u'sqlite/chrome_history', u'chrome_preferences', u'firefox_cache',
u'sqlite/firefox_cookies', u'sqlite/firefox_downloads',
u'sqlite/firefox_history', u'java_idx', u'esedb/msie_webcache',
u'msiecf', u'opera_global', u'opera_typed_history',
u'plist/safari_history', u'winfirewall', u'winjob', u'winreg'])
parser_names = manager.ParsersManager._GetParsersFromPresetCategory(
u'win_gen')
self.assertEqual(sorted(parser_names), expected_parser_names)
parser_names = manager.ParsersManager._GetParsersFromPresetCategory(
u'bogus')
self.assertEqual(parser_names, [])
def testReduceParserFilters(self):
"""Tests the ReduceParserFilters function."""
includes = {}
excludes = {}
manager.ParsersManager._ReduceParserFilters(includes, excludes)
self.assertEqual(includes, {})
self.assertEqual(excludes, {})
includes = {u'test_include': u''}
excludes = {u'test_exclude': u''}
manager.ParsersManager._ReduceParserFilters(includes, excludes)
self.assertEqual(includes, {u'test_include': u''})
self.assertEqual(excludes, {})
includes = {u'test_include': u'', u'test_intersection': u''}
excludes = {u'test_exclude': u'', u'test_intersection': u''}
manager.ParsersManager._ReduceParserFilters(includes, excludes)
self.assertEqual(includes, {u'test_include': u''})
self.assertEqual(excludes, {})
includes = {u'test': [u'include']}
excludes = {u'test': [u'exclude']}
manager.ParsersManager._ReduceParserFilters(includes, excludes)
self.assertEqual(includes, {u'test': [u'include']})
self.assertEqual(excludes, {u'test': [u'exclude']})
includes = {u'test': [u'include', u'intersection']}
excludes = {u'test': [u'exclude', u'intersection']}
manager.ParsersManager._ReduceParserFilters(includes, excludes)
self.assertEqual(includes, {u'test': [u'include']})
self.assertEqual(excludes, {u'test': [u'exclude', u'intersection']})
def testParserRegistration(self):
"""Tests the RegisterParser and DeregisterParser functions."""
number_of_parsers = len(manager.ParsersManager._parser_classes)
manager.ParsersManager.RegisterParser(TestParser)
self.assertEqual(
len(manager.ParsersManager._parser_classes),
number_of_parsers + 1)
with self.assertRaises(KeyError):
manager.ParsersManager.RegisterParser(TestParser)
manager.ParsersManager.DeregisterParser(TestParser)
self.assertEqual(
len(manager.ParsersManager._parser_classes),
number_of_parsers)
def testPluginRegistration(self):
"""Tests the RegisterPlugin and DeregisterPlugin functions."""
TestParserWithPlugins.RegisterPlugin(TestPlugin)
self.assertEqual(
len(TestParserWithPlugins._plugin_classes), 1)
with self.assertRaises(KeyError):
TestParserWithPlugins.RegisterPlugin(TestPlugin)
TestParserWithPlugins.DeregisterPlugin(TestPlugin)
self.assertEqual(
len(TestParserWithPlugins._plugin_classes), 0)
def testGetParserAndPluginNames(self):
"""Tests the GetParserAndPluginNames function."""
TestParserWithPlugins.RegisterPlugin(TestPlugin)
manager.ParsersManager.RegisterParser(TestParserWithPlugins)
manager.ParsersManager.RegisterParser(TestParser)
parser_names = manager.ParsersManager.GetParserAndPluginNames(
parser_filter_expression=u'test_parser')
self.assertEqual(parser_names, [u'test_parser'])
parser_names = manager.ParsersManager.GetParserAndPluginNames(
parser_filter_expression=u'!test_parser')
self.assertNotIn(u'test_parser', parser_names)
expected_parser_names = [
u'test_parser_with_plugins',
u'test_parser_with_plugins/test_plugin']
parser_names = manager.ParsersManager.GetParserAndPluginNames(
parser_filter_expression=u'test_parser_with_plugins/test_plugin')
self.assertEqual(parser_names, expected_parser_names)
# Test with a parser name, not using plugin names.
expected_parser_names = [
u'test_parser_with_plugins',
u'test_parser_with_plugins/test_plugin']
parser_names = manager.ParsersManager.GetParserAndPluginNames(
parser_filter_expression=u'test_parser_with_plugins')
self.assertEqual(parser_names, expected_parser_names)
TestParserWithPlugins.DeregisterPlugin(TestPlugin)
manager.ParsersManager.DeregisterParser(TestParserWithPlugins)
manager.ParsersManager.DeregisterParser(TestParser)
def testGetParserObjectByName(self):
"""Tests the GetParserObjectByName function."""
manager.ParsersManager.RegisterParser(TestParser)
parser_object = manager.ParsersManager.GetParserObjectByName(
u'test_parser')
self.assertIsNotNone(parser_object)
self.assertEqual(parser_object.NAME, u'test_parser')
parser_object = manager.ParsersManager.GetParserObjectByName(u'bogus')
self.assertIsNone(parser_object)
manager.ParsersManager.DeregisterParser(TestParser)
def testGetParserObjects(self):
"""Tests the GetParserObjects function."""
TestParserWithPlugins.RegisterPlugin(TestPlugin)
manager.ParsersManager.RegisterParser(TestParserWithPlugins)
manager.ParsersManager.RegisterParser(TestParser)
parser_names = []
parser_objects = manager.ParsersManager.GetParserObjects(
parser_filter_expression=u'test_parser')
for _, parser_object in iter(parser_objects.items()):
parser_names.append(parser_object.NAME)
self.assertEqual(parser_names, [u'test_parser'])
parser_names = []
parser_objects = manager.ParsersManager.GetParserObjects(
parser_filter_expression=u'!test_parser')
for _, parser_object in iter(parser_objects.items()):
parser_names.append(parser_object.NAME)
self.assertNotEqual(len(parser_names), 0)
self.assertNotIn(u'test_parser', parser_names)
parser_names = []
parser_objects = manager.ParsersManager.GetParserObjects(
parser_filter_expression=u'test_parser_with_plugins/test_plugin')
for _, parser_object in iter(parser_objects.items()):
parser_names.append(parser_object.NAME)
self.assertEqual(parser_names, [u'test_parser_with_plugins'])
# Test with a parser name, not using plugin names.
parser_names = []
parser_objects = manager.ParsersManager.GetParserObjects(
parser_filter_expression=u'test_parser_with_plugins')
for _, parser_object in iter(parser_objects.items()):
parser_names.append(parser_object.NAME)
self.assertEqual(parser_names, [u'test_parser_with_plugins'])
TestParserWithPlugins.DeregisterPlugin(TestPlugin)
manager.ParsersManager.DeregisterParser(TestParserWithPlugins)
manager.ParsersManager.DeregisterParser(TestParser)
def testGetParsers(self):
"""Tests the GetParsers function."""
TestParserWithPlugins.RegisterPlugin(TestPlugin)
manager.ParsersManager.RegisterParser(TestParserWithPlugins)
manager.ParsersManager.RegisterParser(TestParser)
parser_names = []
for _, parser_class in manager.ParsersManager.GetParsers(
parser_filter_expression=u'test_parser'):
parser_names.append(parser_class.NAME)
self.assertEqual(parser_names, [u'test_parser'])
parser_names = []
for _, parser_class in manager.ParsersManager.GetParsers(
parser_filter_expression=u'!test_parser'):
parser_names.append(parser_class.NAME)
self.assertNotEqual(len(parser_names), 0)
self.assertNotIn(u'test_parser', parser_names)
parser_names = []
for _, parser_class in manager.ParsersManager.GetParsers(
parser_filter_expression=u'test_parser_with_plugins/test_plugin'):
parser_names.append(parser_class.NAME)
self.assertEqual(parser_names, [u'test_parser_with_plugins'])
# Test with a parser name, not using plugin names.
parser_names = []
for _, parser_class in manager.ParsersManager.GetParsers(
parser_filter_expression=u'test_parser_with_plugins'):
parser_names.append(parser_class.NAME)
self.assertEqual(parser_names, [u'test_parser_with_plugins'])
TestParserWithPlugins.DeregisterPlugin(TestPlugin)
manager.ParsersManager.DeregisterParser(TestParserWithPlugins)
manager.ParsersManager.DeregisterParser(TestParser)
def testGetPluginObjectByName(self):
"""Tests the GetPluginObjectByName function."""
TestParserWithPlugins.RegisterPlugin(TestPlugin)
plugin_object = TestParserWithPlugins.GetPluginObjectByName(u'test_plugin')
self.assertIsNotNone(plugin_object)
plugin_object = TestParserWithPlugins.GetPluginObjectByName(u'bogus')
self.assertIsNone(plugin_object)
TestParserWithPlugins.DeregisterPlugin(TestPlugin)
def testGetPlugins(self):
"""Tests the GetPlugins function."""
TestParserWithPlugins.RegisterPlugin(TestPlugin)
generator = TestParserWithPlugins.GetPlugins()
plugin_tuples = list(generator)
self.assertNotEqual(len(plugin_tuples), 0)
self.assertIsNotNone(plugin_tuples[0])
TestParserWithPlugins.DeregisterPlugin(TestPlugin)
# TODO: add GetParsersInformation test.
# TODO: add GetNamesOfParsersWithPlugins test.
# TODO: add GetScanner test.
# TODO: add GetSpecificationStore test.
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "60119b913f236e508cddd56a639ac389",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 79,
"avg_line_length": 37.3810888252149,
"alnum_prop": 0.7205273647094895,
"repo_name": "dc3-plaso/plaso",
"id": "552f42670f1cb8d2e52ed45b0765954030a5eb13",
"size": "13088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parsers/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1683"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "3875098"
},
{
"name": "Shell",
"bytes": "17861"
}
],
"symlink_target": ""
} |
def dotproduct(x,y):
return math.sqrt(x**2 + y**2)
# This is used to convert the meters per second velocity into a miles per hour value.
def tomph(velocity):
return velocity * 2.24
'''
This function is used to convert the units for increment traveled from meters to feet; because we measure over each second,
a more human readable unit is needed.
'''
def meterstofeet(args):
return args * 3.28
def getDriver(dirName):
return (basename(dirName))
def getFileBaseName(baseName):
return fileName.split('.')[0]
def getTrip(baseName):
return int(getFileBaseName(baseName))
def isFloat(str):
try:
float(str)
return True
except:
return False
| {
"content_hash": "c4204bd3938368ea644f1818ca01650d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 124,
"avg_line_length": 19.75,
"alnum_prop": 0.679324894514768,
"repo_name": "georgetown-analytics/skidmarks",
"id": "453b6fe895d881f4faf3e3a65f1e878c0e214cf6",
"size": "1776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6709"
},
{
"name": "Makefile",
"bytes": "6778"
},
{
"name": "Python",
"bytes": "128849"
}
],
"symlink_target": ""
} |
from unittest import main
from qiita_pet.test.tornado_test_base import TestHandlerBase
from qiita_db.handlers.tests.oauthbase import OauthTestingBase
class StudyIndexHandlerTests(TestHandlerBase):
def test_get_exists(self):
response = self.get('/study/description/1')
self.assertEqual(response.code, 200)
def test_get_no_exists(self):
response = self.get('/study/description/245')
self.assertEqual(response.code, 404)
class StudyBaseInfoAJAX(TestHandlerBase):
# TODO: Missing tests
pass
class DataTypesMenuAJAXTests(TestHandlerBase):
def test_get(self):
response = self.get('/study/description/data_type_menu/',
{'study_id': '1'})
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, "")
def test_get_no_exists(self):
response = self.get('/study/description/data_type_menu/',
{'study_id': '245'})
self.assertEqual(response.code, 404)
class StudyFilesAJAXTests(TestHandlerBase):
def test_get(self):
args = {'study_id': 1, 'artifact_type': 'FASTQ', 'prep_template_id': 1}
response = self.get('/study/files/', args)
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, "")
class TestStudyGetTags(TestHandlerBase):
def test_get(self):
response = self.get('/study/get_tags/')
exp = ('{"status": "success", "message": "", "tags": '
'{"admin": [], "user": []}}')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, exp)
class TestStudyTags(OauthTestingBase):
def test_get(self):
response = self.get('/study/tags/1')
exp = ('{"status": "success", "message": "", "tags": []}')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, exp)
# test error
response = self.get('/study/tags/bla')
self.assertEqual(response.code, 400)
def test_patch(self):
arguments = {'op': 'replace', 'path': '/tags',
'value[]': "['testA', 'testB']"}
obs = self.patch('/study/tags/1', headers=self.header, data=arguments)
self.assertEqual(obs.code, 200)
self.assertEqual(obs.body, '{"status": "success", "message": ""}')
# checking the tags were added
response = self.get('/study/tags/1')
exp = ('{"status": "success", "message": "", "tags": '
'["[\'testA\', \'testB\']"]}')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, exp)
arguments = {'op': 'replace', 'path': '/tags',
'value[]': "['testA', 'testB']"}
obs = self.patch('/study/tags/b', headers=self.header, data=arguments)
self.assertEqual(obs.code, 400)
if __name__ == "__main__":
main()
| {
"content_hash": "48de3245da6b235e7fcd202784199efd",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.591886269070735,
"repo_name": "josenavas/QiiTa",
"id": "d8e60f423d69326837690383e332c8dd82e58022",
"size": "3234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiita_pet/handlers/study_handlers/tests/test_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1721"
},
{
"name": "HTML",
"bytes": "559042"
},
{
"name": "JavaScript",
"bytes": "81276"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLpgSQL",
"bytes": "82663"
},
{
"name": "Python",
"bytes": "2294577"
},
{
"name": "SQLPL",
"bytes": "7501"
},
{
"name": "Shell",
"bytes": "3062"
}
],
"symlink_target": ""
} |
"""
===============================================
Reconstruct with Generalized Q-Sampling Imaging
===============================================
We show how to apply Generalized Q-Sampling Imaging (Yeh et al. IEEE TMI 2010)
to diffusion MRI datasets. You can think of GQI as an analytical version of
DSI orientation distribution function (ODF) (Garyfallidis, PhD thesis, 2012).
First import the necessary modules:
"""
import nibabel as nib
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere
from dipy.align.aniso2iso import resample
from dipy.reconst.gqi import GeneralizedQSamplingModel
from dipy.reconst.odf import peaks_from_model
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data.shape ``(96, 96, 60, 203)``
This dataset has anisotropic voxel sizes, therefore reslicing is necessary.
"""
affine = img.get_affine()
"""
Read the voxel size from the image header.
"""
voxel_size = img.get_header().get_zooms()[:3]
"""
Instantiate the Model and apply it to the data.
"""
gqmodel = GeneralizedQSamplingModel(gtab, sampling_length=3)
"""
The parameter `sampling_length` is used here to
Lets just use one slice only from the data.
"""
dataslice = data[:, :, data.shape[2] / 2]
mask = dataslice[..., 0] > 50
gqfit = gqmodel.fit(dataslice, mask=mask)
"""
Load an odf reconstruction sphere
"""
sphere = get_sphere('symmetric724')
"""
Calculate the ODFs with this specific sphere
"""
ODF = gqfit.odf(sphere)
print('ODF.shape (%d, %d, %d)' % ODF.shape)
"""
ODF.shape ``(96, 96, 724)``
Using peaks_from_model we can find the main peaks of the ODFs and other
properties.
"""
gqpeaks = peaks_from_model(model=gqmodel,
data=dataslice,
sphere=sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=mask,
return_odf=False,
normalize_peaks=True)
gqpeak_values = gqpeaks.peak_values
"""
gqpeak_indices show which sphere points have the maximum values.
"""
gqpeak_indices = gqpeaks.peak_indices
"""
It is also possible to calculate GFA.
"""
GFA = gqpeaks.gfa
print('GFA.shape (%d, %d)' % GFA.shape)
"""
With parameter `return_odf=True` we can obtain the ODF using gqpeaks.ODF
"""
gqpeaks = peaks_from_model(model=gqmodel,
data=dataslice,
sphere=sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=mask,
return_odf=True,
normalize_peaks=True)
"""
This ODF will be of course identical to the ODF calculated above as long as the same
data and mask are used.
"""
np.sum(gqpeaks.odf != ODF) == 0
"""
True
The advantage of using peaks_from_models is that it calculates the ODF only once and
saves it or deletes if it is not necessary to keep.
.. include:: ../links_names.inc
"""
| {
"content_hash": "5d419a24184733fbc30b5a6ea093603b",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 84,
"avg_line_length": 23.965034965034967,
"alnum_prop": 0.6247446746425445,
"repo_name": "maurozucchelli/dipy",
"id": "197eb89e15a66380bd64a5277e21c208ea172fac",
"size": "3427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/examples/reconst_gqi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "335"
},
{
"name": "CSS",
"bytes": "641"
},
{
"name": "Python",
"bytes": "1177807"
},
{
"name": "Shell",
"bytes": "2955"
},
{
"name": "TeX",
"bytes": "537291"
}
],
"symlink_target": ""
} |
"""This module contains test for KeyValue helpers."""
__author__ = "Krzysztof Trzepla"
__copyright__ = """(C) 2016 ACK CYFRONET AGH,
This software is released under the MIT license cited in 'LICENSE.txt'."""
from test_common import *
from common_test_base import *
import pytest
THREAD_NUMBER = 8
BLOCK_SIZE = 1024
def test_write_should_write_multiple_blocks(helper, file_id, server):
block_num = 20
seed = random_str(BLOCK_SIZE)
data = seed * block_num
assert helper.write(file_id, data, 0) == len(data)
assert helper.read(file_id, 0, len(data)) == data
assert len(server.list(file_id)) == block_num
def test_unlink_should_delete_data(helper, file_id, server):
data = random_str()
offset = random_int()
assert helper.write(file_id, data, offset) == len(data)
assert len(server.list(file_id)) > 0
helper.unlink(file_id, offset+len(data))
assert len(server.list(file_id)) == 0
def test_truncate_should_create_empty_file(helper, file_id):
for size in range(random_int(), -1, -1):
helper.truncate(file_id, size, 0)
assert helper.read(file_id, 0, size) == '\0' * size
def test_truncate_should_create_empty_multi_block_file(helper, file_id, server):
blocks_num = 10
size = blocks_num * BLOCK_SIZE
helper.truncate(file_id, size, 0)
assert helper.read(file_id, 0, size + 1) == '\0' * size + '\0'
assert len(server.list(file_id)) == 1
def test_truncate_should_pad_block(helper, file_id, server):
data = random_str()
assert helper.write(file_id, data, BLOCK_SIZE) == len(data)
assert len(server.list(file_id)) == 1
helper.truncate(file_id, BLOCK_SIZE, len(data)+BLOCK_SIZE)
assert helper.read(file_id, 0, BLOCK_SIZE + 1) == '\0' * BLOCK_SIZE + '\0'
assert helper.write(file_id, data, BLOCK_SIZE) == len(data)
def test_truncate_should_delete_all_blocks(helper, file_id, server):
blocks_num = 10
data = random_str(blocks_num * BLOCK_SIZE)
assert helper.write(file_id, data, 0) == len(data)
assert len(server.list(file_id)) == blocks_num
helper.truncate(file_id, 0, len(data))
assert helper.read(file_id, 0, len(data)) == '\0'*len(data)
assert len(server.list(file_id)) == 0
def test_write_should_overwrite_multiple_blocks_part(helper, file_id):
block_num = 10
updates_num = 100
seed = random_str(BLOCK_SIZE)
data = seed * block_num
assert helper.write(file_id, data, 0) == len(data)
for _ in range(updates_num):
offset = random_int(lower_bound=0, upper_bound=len(data))
block = random_str(BLOCK_SIZE)
data = data[:offset] + block + data[offset + len(block):]
helper.write(file_id, block, offset) == len(block)
assert helper.read(file_id, 0, len(data)) == data
def test_read_should_read_multi_block_data_with_holes(helper, file_id):
data = random_str(10)
empty_block = '\0' * BLOCK_SIZE
block_num = 10
assert helper.write(file_id, data, 0) == len(data)
assert helper.write(file_id, data, block_num * BLOCK_SIZE) == len(data)
data = data + empty_block[len(data):] + (block_num - 1) * empty_block + data
assert helper.read(file_id, 0, len(data)) == data
def test_read_should_read_empty_data(helper, file_id):
offset = random_int()
size = random_int()
assert helper.read(file_id, offset, size) == '\0'*size
| {
"content_hash": "ea836697aae9b7a73c888cb2d50035e2",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 33.64,
"alnum_prop": 0.6492271105826397,
"repo_name": "onedata/helpers",
"id": "6f5737b8af2ff0c9f91612f53e769576ea314d04",
"size": "3364",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/integration/key_value_test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1175228"
},
{
"name": "CMake",
"bytes": "55270"
},
{
"name": "Erlang",
"bytes": "2164"
},
{
"name": "Makefile",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "159032"
}
],
"symlink_target": ""
} |
import random
originalKey = ["a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n",
"o", "p", "q", "r", "s", "t", "u",
"v", "w", "x", "y", "z"]
class Rotors(object):
def __init__(self, *args):
self.originalDict = [i for i in originalKey]
random.shuffle(self.originalDict)
self.originalKey = []
for i in reversed(self.originalDict):
self.originalKey.append(i)
self.__rotorId__ = args[0]
# self.__position__ = 0
def encrypting(self, position):
unoriginalDict = self.originalDict[position:]
unoriginalDict = unoriginalDict + self.originalDict[:position]
encryptDict = dict(zip(self.originalKey, unoriginalDict))
return encryptDict
def doEncrypt(self, string, position=0):
string = string.lower()
encryptString = ""
for i in string:
try:
encryptString += self.encrypting(position)[i]
position += 1
except KeyError:
encryptString += i
return encryptString
class Reflector(object):
def __init__(self, *args):
self.originalDict = [i for i in originalKey]
self.__RefId__ = args[0]
random.shuffle(self.originalDict)
self.originalKey = []
for i in reversed(self.originalDict):
self.originalKey.append(i)
def encrypt(self, string):
encryptDict = dict(zip(self.originalKey, self.originalDict))
encryptString = ""
for i in string:
try:
encryptString += encryptDict[i]
except KeyError:
encryptString += i
return encryptString
| {
"content_hash": "e826af9730145d51acecdc7af8237b14",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 70,
"avg_line_length": 28.112903225806452,
"alnum_prop": 0.5364314400458978,
"repo_name": "ccqpein/Enigma-machine-sample",
"id": "e153fd83a83f989e98bd6aa839b94b6778662050",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Hardware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2747"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import os
import re
from oslo.config import cfg
from oslo.serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from openstack_horizon.openstack.common import fileutils
from openstack_horizon.openstack.common._i18n import _, _LE, _LW
from openstack_horizon.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('The directories of policy configuration files is '
'stored')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True):
self.rules = Rules(rules, default_rule)
self.default_rule = default_rule or CONF.policy_default_rule
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
self.use_conf = use_conf
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
def clear(self):
"""Clears Enforcer rules, policy's cache and policy's path."""
self.set_rules({})
fileutils.delete_cached_file(self.policy_path)
self.default_rule = None
self.policy_path = None
def load_rules(self, force_reload=False):
"""Loads policy_path's rules.
Policy file is cached and will be reloaded if modified.
:param force_reload: Whether to overwrite current rules.
"""
if force_reload:
self.use_conf = force_reload
if self.use_conf:
if not self.policy_path:
self.policy_path = self._get_policy_path(self.policy_file)
self._load_policy_file(self.policy_path, force_reload)
for path in CONF.policy_dirs:
try:
path = self._get_policy_path(path)
except cfg.ConfigFilesNotFoundError:
LOG.warn(_LW("Can not find policy directories %s"), path)
continue
self._walk_through_policy_directory(path,
self._load_policy_file,
force_reload, False)
def _walk_through_policy_directory(self, path, func, *args):
# We do not iterate over sub-directories.
policy_files = next(os.walk(path))[2]
policy_files.sort()
for policy_file in [p for p in policy_files if not p.startswith('.')]:
func(os.path.join(path, policy_file), *args)
def _load_policy_file(self, path, force_reload, overwrite=True):
reloaded, data = fileutils.read_cached_file(
path, force_reload=force_reload)
if reloaded or not self.rules:
rules = Rules.load_json(data, self.default_rule)
self.set_rules(rules, overwrite)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self, path):
"""Locate the policy json data file/path.
:param path: It's value can be a full path or related path. When
full path specified, this function just returns the full
path. When related path specified, this function will
search configuration directories to find one that exists.
:returns: The policy path
:raises: ConfigFilesNotFoundError if the file/path couldn't
be located.
"""
policy_path = CONF.find_file(path)
if policy_path:
return policy_path
raise cfg.ConfigFilesNotFoundError((path,))
def enforce(self, rule, target, creds, do_raise=False,
exc=None, *args, **kwargs):
"""Checks authorization of a rule against the target and credentials.
:param rule: A string or BaseCheck instance specifying the rule
to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param do_raise: Whether to raise an exception or not if check
fails.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If not specified, PolicyNotAuthorized
will be used.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
self.load_rules()
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds, self)
elif not self.rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = self.rules[rule](target, creds, self)
except KeyError:
LOG.debug("Rule [%s] doesn't exist" % rule)
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if do_raise and not result:
if exc:
raise exc(*args, **kwargs)
raise PolicyNotAuthorized(rule)
return result
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns False (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns True (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks."""
def __init__(self, kind, match):
"""Initiates Check instance.
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
"""
def __init__(self, rule):
"""Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
"""
def __init__(self, rules):
"""Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns True.
"""
def __init__(self, rules):
"""Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""Parse a single base check rule into an appropriate Check object."""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_LE("Failed to understand rule %s") % rule)
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_LE("No handler for matches of kind %s") % kind)
return FalseCheck()
def _parse_list_rule(rule):
"""Translates the old list-of-lists syntax into a tree of Check objects.
Provided for backwards compatibility.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, six.string_types):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""Metaclass for the ParseState class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
reduce() method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
Raises ValueError if the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""Create an 'and_expr'.
Join two checks by the 'and' operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""Extend an 'and_expr' by adding one more check."""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""Create an 'or_expr'.
Join two checks by the 'or' operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""Extend an 'or_expr' by adding one more check."""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""Parses policy to the tree.
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_LE("Failed to understand rule %s") % rule)
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""Parses a policy rule into a tree of Check objects."""
# If the rule is a string, it's in the policy language
if isinstance(rule, six.string_types):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Recursively checks credentials based on the defined rules."""
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds, enforcer):
"""Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
True:%(user.enabled)s
'Member':%(role.name)s
"""
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
kind_parts = self.kind.split('.')
leftval = creds
for kind_part in kind_parts:
leftval = leftval[kind_part]
except KeyError:
return False
return match == six.text_type(leftval)
| {
"content_hash": "712c88798016b1864ec6bd6f988a2983",
"timestamp": "",
"source": "github",
"line_count": 909,
"max_line_length": 78,
"avg_line_length": 30.88998899889989,
"alnum_prop": 0.5958189394209196,
"repo_name": "mrunge/openstack_horizon",
"id": "b4185e65360da062c4d7d53fda82562bf93c39c7",
"size": "28720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_horizon/openstack/common/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63809"
},
{
"name": "JavaScript",
"bytes": "40"
},
{
"name": "Python",
"bytes": "3460539"
},
{
"name": "Shell",
"bytes": "16000"
}
],
"symlink_target": ""
} |
"""Forms for the ``enquiry`` app."""
| {
"content_hash": "1c80a49ab98083d6a9803da5e9b18f22",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 36,
"avg_line_length": 37,
"alnum_prop": 0.5675675675675675,
"repo_name": "bitmazk/django-enquiry",
"id": "e83efd218b5c98f9d1184a3a137f41475da49c82",
"size": "37",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enquiry/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74079"
}
],
"symlink_target": ""
} |
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import io, read_events, read_cov, read_source_spaces
from mne import SourceEstimate
from mne.datasets import sample
from mne.viz import plot_cov, plot_bem, plot_events
from mne.viz import plot_source_spectrogram
warnings.simplefilter('always') # enable b/c these tests throw warnings
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
import matplotlib.pyplot as plt
data_dir = sample.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
def _get_raw():
return io.Raw(raw_fname, preload=True)
def _get_events():
return read_events(event_name)
def test_plot_cov():
"""Test plotting of covariances
"""
raw = _get_raw()
cov = read_cov(cov_fname)
fig1, fig2 = plot_cov(cov, raw.info, proj=True, exclude=raw.ch_names[6:])
plt.close('all')
@sample.requires_sample_data
def test_plot_bem():
"""Test plotting of BEM contours
"""
assert_raises(IOError, plot_bem, subject='bad-subject',
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_bem, subject='sample',
subjects_dir=subjects_dir, orientation='bad-ori')
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[50, 100])
def test_plot_events():
"""Test plotting events
"""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
plot_events(events, raw.info['sfreq'], raw.first_samp)
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 1}, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 111}, color=color)
@sample.requires_sample_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram
"""
sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
assert_raises(ValueError, plot_source_spectrogram, [], [])
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
| {
"content_hash": "b564e026d77c38ca878e2743b53e0b2c",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.6283018867924528,
"repo_name": "jaeilepp/eggie",
"id": "8b76ccf8e369120b26eb9d331245220f105240ee",
"size": "4058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/viz/tests/test_misc.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3357472"
}
],
"symlink_target": ""
} |
from snp import SNPService, SNProtocolClientFactory, Script, CertManager
from Config import Config
from twisted.internet import reactor, ssl
from TelegramService import TelegramService
class MainService(SNPService):
def __init__(self):
SNPService.__init__(self)
self.config = Config.getconf()
self.name = self.config.name
self.port = int(self.config.server.Port)
self.ip = self.config.server.IP
self.tg = TelegramService(self.config.token)
self.cert_manager = CertManager("keys", "skynet", self.name)
def type_wel(self, request, reqid, protocol):
protocol.sendResponse({"Type": "WEL", "Name": self.name, "Methods": []}, reqid)
def type_cmt(self, request, reqid, protocol):
pass
def startService(self):
from snp import create_self_signed_cert
create_self_signed_cert("keys", self.config.name)
fact = SNProtocolClientFactory(self)
self.cert_manager.connect_to_server(self.ip, self.port, fact)
self.tg.parent = self
self.tg.startService()
def connectionMade(self, protocol):
ip = protocol.transport.getPeer().host
self.peers[ip] = protocol
def get_devices(self):
def callb(res):
ret = {}
devices = res["Devices"]
for device in devices:
ret[int(device["ID"])] = device["Name"]
return ret
d = list(self.peers.values())[0].sendRequest({"Type": "GDL"})
d.addCallback(callb)
return d
def get_device_info(self, id):
def callb(res):
return res["Device"]
d = list(self.peers.values())[0].sendRequest({"Type": "GDF", "DevId": id})
d.addCallback(callb)
return d
def get_scripts(self, password):
def callb(res):
scripts = {int(script["Id"]): Script.create_from_dict(script)for script in res["Scripts"]}
return scripts
d = list(self.peers.values())[0].sendRequest({"Type": "GSC", "Password": password})
d.addCallback(callb)
return d
def remove_script(self, id, password):
d = list(self.peers.values())[0].sendRequest({"Type": "DSC", "ScriptId": id, "Password": password})
return d
def update_field(self, dev_id, field, value):
d = list(self.peers.values())[0].sendRequest({"Type": "UDF",
"DevId": dev_id, "Field": field, "Value": value})
return d
def create_script(self, script, password):
d = list(self.peers.values())[0].sendRequest({"Type": "CSC", "Script": script.to_dict(), "Password": password})
return d
def get_servers(self, password):
def callb(res):
return res["Servers"]
d = list(self.peers.values())[0].sendRequest({"Type": "GSD", "Password": password})
d.addCallback(callb)
return d
def add_server(self, ip, port, pin, password):
def callb(res):
return res["Server"]
d = list(self.peers.values())[0].sendRequest({"Type": "RSD", "Password": password,
"IP": ip, "Port": port, "Pin": pin})
d.addCallback(callb)
return d
| {
"content_hash": "5234f6f025ea8381a997485de2bb734b",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 119,
"avg_line_length": 37.02272727272727,
"alnum_prop": 0.5776550030693677,
"repo_name": "tsnik/SkyNet",
"id": "94889d3813c9c1e3417977e0093e51108d9f681d",
"size": "3258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SkyNetControlTelegramServer/MainService.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "81163"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup, find_packages
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
version = "0.5.0"
setup(
name="hflossk",
version=version,
description="HFOSS course materials via flask",
classifiers=[
"Intended Audience :: Education",
"Topic :: Education :: Computer Aided Instruction (CAI)",
],
keywords="",
author="Remy DeCausemaker",
author_email="[email protected]",
url="http://fossrit.github.io/hflossk",
license="GPLv3+",
packages=find_packages(
),
include_package_data=True,
zip_safe=False,
install_requires=[
"Flask",
"mako",
"flask-mako",
"feedparser",
"pyyaml",
"frozen-flask",
"tornado"
],
tests_require=[
'tox',
'nose',
'validator.py',
'pep8',
],
#TODO: Deal with entry_points
#entry_points="""
#[console_scripts]
#pythong = pythong.util:parse_args
#"""
)
| {
"content_hash": "2708f20dd880b3871803999f36bdb909",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 65,
"avg_line_length": 22.354166666666668,
"alnum_prop": 0.581547064305685,
"repo_name": "decause/hflossk",
"id": "f0bb404c783a7f1763c6c7a934cd9442fc1b57e5",
"size": "1115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4183"
},
{
"name": "JavaScript",
"bytes": "291"
},
{
"name": "Makefile",
"bytes": "117790"
},
{
"name": "Python",
"bytes": "39833"
}
],
"symlink_target": ""
} |
import urllib2
import gzip
import re
import sys
from StringIO import StringIO
from bs4 import BeautifulSoup
__author__ = 'cheyulin'
def loadData(url):
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
print response.info().get('Content-Encoding')
if response.info().get('Content-Encoding') == 'gzip':
print 'response data is in gzip format.'
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
return data
if __name__ == '__main__':
reload(sys)
i = 0;
sys.setdefaultencoding('utf-8')
path = r'/home/cheyulin/Documents/Paper/GraphInDB/vldb2013/'
page = loadData('http://www.vldb.org/pvldb/vol6.html')
# path = r'/home/cheyulin/Documents/Paper/GraphInDB/vldb2014/'
# page = loadData('http://www.vldb.org/pvldb/vol7.html')
# path = r'/home/cheyulin/Documents/Paper/GraphInDB/vldb2016/'
# page = loadData('http://www.vldb.org/pvldb/vol9.html')
soup = BeautifulSoup(page, from_encoding='utf-8')
paper_info = soup.find_all('a');
for paper in paper_info:
if re.match(r'.*http.*pdf.*', str(paper)):
paper_file_name = str(paper.string).strip().replace(' ', '_') + '.pdf'
if re.match('.*[Gg]raph.*', paper_file_name):
i += 1
paper_url = paper['href']
print "hi:" + str(paper).split('</a>')[0].split(' ')[2].strip()
print paper_file_name
print str(paper_url).strip()
f = urllib2.urlopen(paper_url)
with open(path + paper_file_name, 'wb') as output_stream:
output_stream.write(f.read())
print i
| {
"content_hash": "b67189c91b6fc1325a723dcc26f5a0ac",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 82,
"avg_line_length": 35.82,
"alnum_prop": 0.5896147403685092,
"repo_name": "YcheLanguageStudio/PythonStudy",
"id": "af5c49dd9107b197cbc087d8323c96cc2f598371",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "study/third_party_library/crawler/FindInterestedPaperVLDB2014.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92486"
}
],
"symlink_target": ""
} |
import os
__version__ = open(os.path.join(os.path.dirname(__file__),'VERSION')).read().strip()
from fields import JSONField
| {
"content_hash": "c5cd1d0d050dd95ce3d9c0ae683ab1f2",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 84,
"avg_line_length": 31.25,
"alnum_prop": 0.696,
"repo_name": "danigosa/django-jsonfield",
"id": "c88faebc19ecdaa010aba883a4eb10fac096139a",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonfield/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14929"
}
],
"symlink_target": ""
} |
"""Script for running all gpkit unit tests"""
from gpkit.tests.helpers import run_tests
def import_tests():
"""Get a list of all GPkit unit test TestCases"""
tests = []
from gpkit.tests import t_tools
tests += t_tools.TESTS
from gpkit.tests import t_sub
tests += t_sub.TESTS
from gpkit.tests import t_vars
tests += t_vars.TESTS
from gpkit.tests import t_nomials
tests += t_nomials.TESTS
from gpkit.tests import t_constraints
tests += t_constraints.TESTS
from gpkit.tests import t_nomial_array
tests += t_nomial_array.TESTS
from gpkit.tests import t_model
tests += t_model.TESTS
from gpkit.tests import t_solution_array
tests += t_solution_array.TESTS
from gpkit.tests import t_small
tests += t_small.TESTS
from gpkit.tests import t_examples
tests += t_examples.TESTS
from gpkit.tests import t_keydict
tests += t_keydict.TESTS
return tests
def run(xmloutput=False, tests=None, verbosity=1):
"""Run all gpkit unit tests.
Arguments
---------
xmloutput: bool
If true, generate xml output files for continuous integration
"""
if tests is None:
tests = import_tests()
if xmloutput:
run_tests(tests, xmloutput='test_reports')
else: # pragma: no cover
run_tests(tests, verbosity=verbosity)
if __name__ == "__main__": # pragma: no cover
run()
| {
"content_hash": "90369497df3487f48d5cb15f10c61874",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 69,
"avg_line_length": 23.311475409836067,
"alnum_prop": 0.6490857946554149,
"repo_name": "hoburg/gpkit",
"id": "1d5148753124069a2261ada2656b5901e4b98400",
"size": "1422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpkit/tests/run_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "398629"
},
{
"name": "Shell",
"bytes": "1334"
}
],
"symlink_target": ""
} |
from base import (BaseDict, BaseList, TopLevelDocumentMetaclass, get_document)
from fields import (ReferenceField, ListField, DictField, MapField)
from connection import get_db
from queryset import QuerySet
from document import Document
class DeReference(object):
def __call__(self, items, max_depth=1, instance=None, name=None):
"""
Cheaply dereferences the items to a set depth.
Also handles the convertion of complex data types.
:param items: The iterable (dict, list, queryset) to be dereferenced.
:param max_depth: The maximum depth to recurse to
:param instance: The owning instance used for tracking changes by
:class:`~esengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~esengine.base.ComplexBaseField`
"""
if items is None or isinstance(items, basestring):
return items
# cheapest way to convert a queryset to a list
# list(queryset) uses a count() query to determine length
if isinstance(items, QuerySet):
items = [i for i in items]
self.max_depth = max_depth
doc_type = None
if instance and instance._fields:
doc_type = instance._fields[name].field
if isinstance(doc_type, ReferenceField):
doc_type = doc_type.document_type
if all([i.__class__ == doc_type for i in items]):
return items
self.reference_map = self._find_references(items)
self.object_map = self._fetch_objects(doc_type=doc_type)
return self._attach_objects(items, 0, instance, name)
def _find_references(self, items, depth=0):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
"""
reference_map = {}
if not items or depth >= self.max_depth:
return reference_map
# Determine the iterator to use
if not hasattr(items, 'items'):
iterator = enumerate(items)
else:
iterator = items.iteritems()
# Recursively find dbreferences
depth += 1
for k, item in iterator:
if hasattr(item, '_fields'):
for field_name, field in item._fields.iteritems():
v = item._data.get(field_name, None)
if isinstance(v, (DBRef)):
reference_map.setdefault(field.document_type, []).append(v.id)
elif isinstance(v, (dict, SON)) and '_ref' in v:
reference_map.setdefault(get_document(v['_cls']), []).append(v['_ref'].id)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
field_cls = getattr(getattr(field, 'field', None), 'document_type', None)
references = self._find_references(v, depth)
for key, refs in references.iteritems():
if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)):
key = field_cls
reference_map.setdefault(key, []).extend(refs)
elif isinstance(item, (DBRef)):
reference_map.setdefault(item.collection, []).append(item.id)
elif isinstance(item, (dict, SON)) and '_ref' in item:
reference_map.setdefault(get_document(item['_cls']), []).append(item['_ref'].id)
elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth:
references = self._find_references(item, depth - 1)
for key, refs in references.iteritems():
reference_map.setdefault(key, []).extend(refs)
return reference_map
def _fetch_objects(self, doc_type=None):
"""Fetch all references and convert to their document objects
"""
object_map = {}
for col, dbrefs in self.reference_map.iteritems():
keys = object_map.keys()
refs = list(set([dbref for dbref in dbrefs if str(dbref) not in keys]))
if hasattr(col, 'objects'): # We have a document class for the refs
references = col.objects.in_bulk(refs)
for key, doc in references.iteritems():
object_map[key] = doc
else: # Generic reference: use the refs data to convert to document
if doc_type and not isinstance(doc_type, (ListField, DictField, MapField,) ):
references = doc_type._get_db()[col].find({'_id': {'$in': refs}})
for ref in references:
doc = doc_type._from_son(ref)
object_map[doc.id] = doc
else:
references = get_db()[col].find({'_id': {'$in': refs}})
for ref in references:
if '_cls' in ref:
doc = get_document(ref["_cls"])._from_son(ref)
else:
doc = doc_type._from_son(ref)
object_map[doc.id] = doc
return object_map
def _attach_objects(self, items, depth=0, instance=None, name=None):
"""
Recursively finds all db references to be dereferenced
:param items: The iterable (dict, list, queryset)
:param depth: The current depth of recursion
:param instance: The owning instance used for tracking changes by
:class:`~esengine.base.ComplexBaseField`
:param name: The name of the field, used for tracking changes by
:class:`~esengine.base.ComplexBaseField`
"""
if not items:
if isinstance(items, (BaseDict, BaseList)):
return items
if instance:
if isinstance(items, dict):
return BaseDict(items, instance, name)
else:
return BaseList(items, instance, name)
if isinstance(items, (dict, SON)):
if '_ref' in items:
return self.object_map.get(items['_ref'].id, items)
elif '_types' in items and '_cls' in items:
doc = get_document(items['_cls'])._from_son(items)
doc._data = self._attach_objects(doc._data, depth, doc, name)
return doc
if not hasattr(items, 'items'):
is_list = True
iterator = enumerate(items)
data = []
else:
is_list = False
iterator = items.iteritems()
data = {}
depth += 1
for k, v in iterator:
if is_list:
data.append(v)
else:
data[k] = v
if k in self.object_map:
data[k] = self.object_map[k]
elif hasattr(v, '_fields'):
for field_name, field in v._fields.iteritems():
v = data[k]._data.get(field_name, None)
if isinstance(v, (DBRef)):
data[k]._data[field_name] = self.object_map.get(v.id, v)
elif isinstance(v, (dict, SON)) and '_ref' in v:
data[k]._data[field_name] = self.object_map.get(v['_ref'].id, v)
elif isinstance(v, dict) and depth <= self.max_depth:
data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=name)
elif isinstance(v, (list, tuple)) and depth <= self.max_depth:
data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=name)
elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth:
data[k] = self._attach_objects(v, depth - 1, instance=instance, name=name)
elif hasattr(v, 'id'):
data[k] = self.object_map.get(v.id, v)
if instance and name:
if is_list:
return BaseList(data, instance, name)
return BaseDict(data, instance, name)
depth += 1
return data
| {
"content_hash": "060ab07d7e08129bc714c5abf3b01d32",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 112,
"avg_line_length": 44.81621621621622,
"alnum_prop": 0.539741888795079,
"repo_name": "mouadino/pyes",
"id": "d83da7c32f5e80e036fef6e24fd1525647961d12",
"size": "8338",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyes/engine/dereference.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "460454"
},
{
"name": "Shell",
"bytes": "557"
}
],
"symlink_target": ""
} |
print('Finding Half Light Radii and Corresponding Slopes')
import astropy.table as table
import numpy as np
import matplotlib.pyplot as plt
from defcuts import *
from def_get_mags import *
from def_lump_prof import *
from my_def_plots import *
from defflags import *
indir='/Users/amandanewmark/repositories/galaxy_dark_matter/GAH/'
datatab = table.Table.read(indir+ 'LOWZ_HSCGAMA15_apmgs+cmodmag.fits')
bands=['g', 'r', 'i','z', 'y']
parm=['flags_pixel_saturated_center','flags_pixel_edge','flags_pixel_interpolated_center','flags_pixel_cr_center','flags_pixel_suspect_center', 'flags_pixel_clipped_any','flags_pixel_bad']
Flags=['flags_pixel_bright_object_center', 'brobj_cen_flag-', 'No Bright Ojbect Centers', 'Only Bright Object Centers', 'brobj_cen_flag']
daperture=[1.01,1.51,2.02,3.02,4.03,5.71,8.40,11.8,16.8,23.5]
aperture=[x*0.5 for x in daperture]
#get rid of cuts
#mag_cmodel
ne=[99.99, 199.99, 0.0]
mincut=0.1
maxcut=''
cutdata=not_cut(datatab, bands, 'mag_forced_cmodel', ne)
#get rid of flagged galaxies
for b in range(0, len(bands)-1):
newdata=many_flags(cutdata, parm, bands[b]) #flags not in y?
cutdata=newdata
bandi=['i']
outdir='/Users/amandanewmark/repositories/galaxy_dark_matter/lumprofplots/single_plot/'
Flagdat, Notdat,lab= TFflag(bandi,Flags, newdata)
halfrad, rad, lumdens=get_halflight(Flagdat, bands, aperture, scale='log')
objid=Flagdat['object_id_1']
m1=halflight_slopes(halfrad, rad, lumdens, objid, plots='yes', outdir=outdir)
#*****now for Not:
halfradN, radN, lumdensN=get_halflight(Notdat, bands, aperture, scale='log')
m2=halflight_slopes(halfradN, radN, lumdensN, objid, plots='yes', outdir=outdir)
figs=plt.figure()
bs=np.linspace(-2.0,-1.4,num=15, endpoint=False)
plt.hist(m1, bs, color='red', label='Not Flagged Galaxies', alpha=0.8)
plt.hist(m2,bs, color='blue', label='Flagged Galaxies', alpha=0.8)
plt.xlabel('Slopes', fontsize=10)
plt.legend(loc=0,prop={'size':7.0})
plt.ylabel('Frequency', fontsize=10)
plt.show()
| {
"content_hash": "9d05dff9674a5d2581e86235da574881",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 188,
"avg_line_length": 31.03125,
"alnum_prop": 0.7275931520644512,
"repo_name": "anewmark/galaxy_dark_matter",
"id": "c50ab19b7edab394ff2fd0a0e5d3834cbe89d1c9",
"size": "1986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "not currently in use/call_halflight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "74060"
},
{
"name": "PostScript",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "240903"
},
{
"name": "TeX",
"bytes": "1742317"
}
],
"symlink_target": ""
} |
from pyswagger import App, primitives, errs, io
from ..utils import get_test_data_folder
from pyswagger.spec.v2_0 import objects, parser
from pyswagger.spec import base
from pyswagger.utils import jp_compose
from pyswagger.primitives import Primitive
import os
import unittest
import datetime
import six
class SchemaTestCase(unittest.TestCase):
""" test for Schema object """
@classmethod
def setUpClass(kls):
kls.app = App._create_(get_test_data_folder(version='2.0', which=os.path.join('schema', 'model')))
def test_model_tag(self):
""" test basic model """
t = self.app.resolve('#/definitions/Tag')
self.assertTrue(isinstance(t, objects.Schema))
v = t._prim_(dict(id=1, name='Hairy'), self.app.prim_factory)
self.assertTrue(isinstance(v, primitives.Model))
self.assertEqual(v.id, 1)
self.assertEqual(v.name, 'Hairy')
def test_model_pet(self):
""" test complex model, including
model inheritance
"""
p = self.app.resolve('#/definitions/Pet')
self.assertTrue(isinstance(p, objects.Schema))
v = p._prim_(dict(
name='Buf',
photoUrls=['http://flickr.com', 'http://www.google.com'],
id=10,
category=dict(
id=1,
name='dog'
),
tags=[
dict(id=1, name='Hairy'),
dict(id=2, name='south'),
]
), self.app.prim_factory)
self.assertTrue(isinstance(v, primitives.Model))
self.assertEqual(v.name, 'Buf')
self.assertEqual(v.photoUrls[0], 'http://flickr.com')
self.assertEqual(v.photoUrls[1], 'http://www.google.com')
self.assertEqual(v.id, 10)
self.assertTrue(isinstance(v.tags[0], primitives.Model))
self.assertTrue(v.tags[0].id, 1)
self.assertTrue(v.tags[0].name, 'Hairy')
self.assertTrue(isinstance(v.category, primitives.Model))
self.assertTrue(v.category.id, 1)
self.assertTrue(v.category.name, 'dog')
def test_model_employee(self):
""" test model with allOf only
"""
e = self.app.resolve("#/definitions/Employee")
self.assertTrue(isinstance(e, objects.Schema))
v = e._prim_(dict(
id=1,
skill_id=2,
location="home",
skill_name="coding",
email="[email protected]"
), self.app.prim_factory)
self.assertTrue(isinstance(v, primitives.Model))
self.assertEqual(v.id, 1)
self.assertEqual(v.skill_id, 2)
self.assertEqual(v.location, "home")
self.assertEqual(v.skill_name, "coding")
self.assertRaises(
errs.ValidationError,
e._prim_, dict(
id=1,
skill_id=2,
location="home",
skill_name="coding",
email="[email protected]"
), self.app.prim_factory
)
def test_model_boss(self):
""" test model with allOf and properties
"""
b = self.app.resolve("#/definitions/Boss")
self.assertTrue(isinstance(b, objects.Schema))
v = b._prim_(dict(
id=1,
location="office",
boss_name="not you"
), self.app.prim_factory)
self.assertTrue(isinstance(v, primitives.Model))
self.assertEqual(v.id, 1)
self.assertEqual(v.location, "office")
self.assertEqual(v.boss_name, "not you")
def test_int(self):
""" test integer,
schema is separated into parts
"""
i = self.app.resolve("#/definitions/int")
self.assertRaises(errs.ValidationError, i._prim_, 200, self.app.prim_factory)
self.assertRaises(errs.ValidationError, i._prim_, 99, self.app.prim_factory)
num_i32 = self.app.resolve('#/definitions/number_int32')
self.assertRaises(errs.ValidationError, num_i32._prim_, 200, self.app.prim_factory)
self.assertRaises(errs.ValidationError, num_i32._prim_, 99, self.app.prim_factory)
num_i64 = self.app.resolve('#/definitions/number_int64')
self.assertRaises(errs.ValidationError, num_i64._prim_, 200, self.app.prim_factory)
self.assertRaises(errs.ValidationError, num_i64._prim_, 99, self.app.prim_factory)
default_int = self.app.resolve('#/definitions/default_int')
self.assertRaises(errs.ValidationError, default_int._prim_, 200, self.app.prim_factory)
self.assertRaises(errs.ValidationError, default_int._prim_, 99, self.app.prim_factory)
def test_array_of_int(self):
""" test array of integer """
i = self.app.resolve('#/definitions/array_int')
# pass
i._prim_([1, 1, 1, 1, 1], self.app.prim_factory)
i._prim_([1, 1], self.app.prim_factory)
# failed
self.assertRaises(errs.ValidationError, i._prim_, [1, 1, 1, 1, 1, 1], self.app.prim_factory)
self.assertRaises(errs.ValidationError, i._prim_, [1], self.app.prim_factory)
def test_num_multiple_of(self):
""" test multipleOf """
i = self.app.resolve("#/definitions/num_multipleOf")
self.assertRaises(errs.ValidationError, i._prim_, 4, self.app.prim_factory)
i._prim_(5, self.app.prim_factory) # should raise nothing
default_number = self.app.resolve('#/definitions/default_number')
default_number._prim_(5, self.app.prim_factory) # should raise nothing
def test_str_enum(self):
""" test str enum """
e = self.app.resolve("#/definitions/str_enum")
self.assertRaises(errs.ValidationError, e._prim_, "yellow", self.app.prim_factory)
e._prim_("green", self.app.prim_factory) # should raise nothing
def test_byte(self):
""" test byte """
b = self.app.resolve("#/definitions/byte")
bv = b._prim_("BBBBB", self.app.prim_factory)
self.assertEqual(str(bv), "BBBBB", self.app.prim_factory)
self.assertEqual(bv.to_json(), "QkJCQkI=")
def test_binary(self):
""" test binary """
b = self.app.resolve("#/definitions/binary")
bv = b._prim_("BBBBB", self.app.prim_factory)
self.assertEqual(str(bv), "BBBBB", self.app.prim_factory)
self.assertEqual(bv.to_json(), "QkJCQkI=")
def test_date(self):
""" test date """
d = self.app.resolve("#/definitions/date")
# test input of constructor
self.assertEqual(str(d._prim_(float(0), self.app.prim_factory)), "1970-01-01")
self.assertEqual(str(d._prim_(datetime.date.fromtimestamp(0), self.app.prim_factory)), "1970-01-01")
self.assertEqual(str(d._prim_(datetime.date.fromtimestamp(0).isoformat(), self.app.prim_factory)), "1970-01-01")
# to_json
dv = d._prim_(float(0), self.app.prim_factory)
self.assertEqual(dv.to_json(), "1970-01-01")
def test_date_time(self):
""" test date-time """
d = self.app.resolve("#/definitions/date-time")
# test input of constructor
self.assertEqual(str(d._prim_(float(0), self.app.prim_factory)), "1970-01-01T00:00:00")
self.assertEqual(str(d._prim_(datetime.datetime.utcfromtimestamp(0), self.app.prim_factory)), "1970-01-01T00:00:00")
self.assertEqual(str(d._prim_(datetime.datetime.utcfromtimestamp(0).isoformat(), self.app.prim_factory)), "1970-01-01T00:00:00")
# to_json
dv = d._prim_(float(0), self.app.prim_factory)
self.assertEqual(dv.to_json(), "1970-01-01T00:00:00")
def test_model_bool(self):
""" test a model containing boolean """
d = self.app.resolve("#/definitions/model_bool")
dv = d._prim_(dict(bool_val=True), self.app.prim_factory)
# try to access it
self.assertEqual(dv.bool_val, True)
def test_email(self):
""" test string in email format """
d = self.app.resolve('#/definitions/email')
dv = d._prim_('[email protected]', self.app.prim_factory)
self.assertEqual(dv, '[email protected]')
self.assertRaises(errs.ValidationError, d._prim_, '[email protected]', self.app.prim_factory)
def test_uuid(self):
""" test string in uuid format """
d = self.app.resolve('#/definitions/uuid')
# string
dv = d._prim_('12345678-1234-5678-1234-567812345678', self.app.prim_factory)
self.assertTrue(isinstance(dv, primitives.UUID), 'should be an primitives.UUID, not {0}'.format(str(type(dv))))
self.assertEqual(str(dv), '12345678-1234-5678-1234-567812345678')
# byte
dv = d._prim_(six.b('\x78\x56\x34\x12\x34\x12\x78\x56\x12\x34\x56\x78\x12\x34\x56\x78'), self.app.prim_factory)
self.assertTrue(isinstance(dv, primitives.UUID), 'should be an primitives.UUID, not {0}'.format(dv))
self.assertEqual(dv.v.bytes, six.b('\x78\x56\x34\x12\x34\x12\x78\x56\x12\x34\x56\x78\x12\x34\x56\x78'))
# unsupported type - e.g. int
self.assertRaises(ValueError, d._prim_, 123, self.app.prim_factory)
def test_password(self):
""" test string in password """
p = self.app.resolve('#/definitions/password')
pv = p._prim_('p@ssw0rd', self.app.prim_factory)
self.assertTrue(isinstance(pv, six.string_types))
def test_read_only(self):
""" make sure read-only for property works """
op = self.app.s('/k').post
self.assertRaises(Exception, op, p1=dict(protected=1))
resp = io.Response(op)
resp.apply_with(0, '{"protected":1}') # allowed
def test_float_dump(self):
""" failed to dump an object with float property
refer to issue: https://github.com/mission-liao/pyswagger/issues/92
"""
app = App.create(get_test_data_folder(version='2.0', which=os.path.join('schema', 'floatDump')))
app.dump() # should not raise exception
def test_unique_item_on_array(self):
""" uniqueItem == True on array of array
"""
# no duplication, should work
d = self.app.resolve('#/definitions/unique_array')
arr_1 = [
['a', 'b', 'c'],
['d', 'e', 'f']
]
o = d._prim_(arr_1, self.app.prim_factory)
self.assertEqual(len(arr_1), len(o))
# duplicated, should remove duplication
arr_2 = [
['a', 'b', 'c'],
['d', 'e', 'f'],
['a', 'b', 'c'],
]
o = d._prim_(arr_2, self.app.prim_factory)
self.assertEqual(len(o), 2)
def test_unique_item_on_object(self):
""" uniqueItem == True on array of object
"""
d = self.app.resolve('#/definitions/unique_object')
obj_1 = {'prop_1': '1-1', 'prop_2': {'prop_2_1': '1-2-1'}}
obj_1_2 = {'prop_1': '1-1', 'prop_2': {'prop_2_1': '1-2-1-2'}}
obj_2 = {'prop_1': '2-1', 'prop_2': {'prop_2_1': '2-2-1'}}
# no duplucation, should work
arr_1 = [obj_1, obj_1_2, obj_2]
o = d._prim_(arr_1, self.app.prim_factory)
self.assertEqual(len(arr_1), len(o))
# duplicated, remove duplication
arr_2 = [obj_1, obj_1_2, obj_2, obj_1, obj_1_2, obj_2]
o = d._prim_(arr_2, self.app.prim_factory)
self.assertEqual(len(o), 3)
class HeaderTestCase(unittest.TestCase):
""" test for Header object """
@classmethod
def setUpClass(kls):
kls.app = App._create_(get_test_data_folder(version='2.0', which=os.path.join('schema', 'model')))
def test_simple_array(self):
""" header in array """
p1 = self.app.resolve(jp_compose(['#', 'paths', '/t', 'get', 'parameters', '0']))
self.assertTrue(isinstance(p1, objects.Parameter))
v = p1._prim_([1, 2, 3, 4, 5], self.app.prim_factory)
self.assertTrue(isinstance(v, primitives.Array))
self.assertEqual(str(v), '1,2,3,4,5')
def test_integer_limit(self):
""" header in integer """
p2 = self.app.resolve(jp_compose(['#', 'paths', '/t', 'get', 'parameters', '1']))
self.assertTrue(isinstance(p2, objects.Parameter))
self.assertRaises(errs.ValidationError, p2._prim_, 101, self.app.prim_factory)
self.assertRaises(errs.ValidationError, p2._prim_, -1, self.app.prim_factory)
def test_multi_level_array(self):
""" header in array of array """
p3 = self.app.resolve(jp_compose(['#', 'paths', '/t', 'get', 'parameters', '2']))
self.assertTrue(isinstance(p3, objects.Parameter))
self.assertEqual(str(p3._prim_(
[
[
[1,2],
[3,4],
[5,6]
],
[
[7,8],
[9,10]
],
[
[11,12],
[13,14]
]
], self.app.prim_factory)), '1|2,3|4,5|6 7|8,9|10 11|12,13|14')
def test_header_in_response(self):
""" header in response """
resp = io.Response(self.app.s('/t').get)
resp.apply_with(status=200, raw=None, header=dict(
test='1|2,3|4,5|6 7|8,9|10 11|12,13|14'
))
# note that the test case here is the same as the one above,
# difference is we would wrap an additional array in header
self.assertEqual(resp.header['test'], [[
[
[1,2],
[3,4],
[5,6]
],
[
[7,8],
[9,10]
],
[
[11,12],
[13,14]
]
]])
class AdditionalPropertiesTestCase(unittest.TestCase):
""" test case for additionalProperties """
@classmethod
def setUpClass(kls):
kls.app = App._create_(get_test_data_folder(version='2.0', which=os.path.join('schema', 'additionalProperties')))
def test_merge(self):
""" verify merge along with additionalProperties """
# Schema
addp = self.app.resolve('#/definitions/add_prop')
final = objects.Schema(base.NullContext())
final.merge(addp, parser.SchemaContext)
# True
addp = self.app.resolve('#/definitions/add_prop_bool')
final = objects.Schema(base.NullContext())
final.merge(addp, parser.SchemaContext)
# False
addp = self.app.resolve('#/definitions/add_prop_false')
final = objects.Schema(base.NullContext())
final.merge(addp, parser.SchemaContext)
# nested with allOf
addp = self.app.resolve('#/definitions/add_prop_nested')
final = objects.Schema(base.NullContext())
final.merge(addp, parser.SchemaContext)
def test_with_schema(self):
m = self.app.prim_factory.produce(
self.app.resolve('#/definitions/add_prop'),
dict(
name_of_map='test',
category1=dict(
id=1,
name='cat'
),
category2=dict(
id=2,
name='dog'
),
category3=dict(
id=3,
name='fish'
)
))
self.assertTrue(isinstance(m, primitives.Model))
self.assertEqual(m.name_of_map, 'test')
self.assertEqual(m.category1.id, 1)
self.assertEqual(m.category1.name, 'cat')
self.assertEqual(m.category2.id, 2)
self.assertEqual(m.category2.name, 'dog')
self.assertEqual(m.category3.id, 3)
self.assertEqual(m.category3.name, 'fish')
def test_with_bool(self):
d = self.app.resolve('#/definitions/add_prop_bool')
m = self.app.prim_factory.produce(
d,
dict(
name='test_bool',
category1=1,
category2='test_qoo'
)
)
self.assertTrue(isinstance(m, primitives.Model))
self.assertEqual(m.name, 'test_bool')
self.assertEqual(m.category1, 1)
self.assertEqual(m.category2, 'test_qoo')
def test_with_bool_false(self):
d = self.app.resolve('#/definitions/add_prop_false')
m = self.app.prim_factory.produce(
d,
dict(
name='test_bool',
category1=1,
category2='test_qoo'
)
)
self.assertTrue(isinstance(m, primitives.Model))
self.assertEqual(m.name, 'test_bool')
self.assertTrue('category1' not in m)
self.assertTrue('category2' not in m)
def test_with_allof_limitation(self):
""" additionalProperties would accept all keys,
we need to make sure nested model process those keys before
additionalProperties intecept all keys
"""
d = self.app.resolve('#/definitions/add_prop_nested')
self.assertRaises(errs.ValidationError, self.app.prim_factory.produce,
d,
dict(
my_int=99
)
)
def test_array_addp_without_prop(self):
""" special case for array of items with additionalProperties
and without properties
"""
d = self.app.resolve('#/definitions/addp_no_prop')
m = self.app.prim_factory.produce(
d, [
dict(a=1, b=2, c=3),
dict(name='john', email='[email protected]'),
]
)
self.assertEqual(len(m), 2)
self.assertEqual(m[0], dict(a=1, b=2, c=3)) # although we didn't validate it, we should still output it.
self.assertEqual(m[1], dict(name='john', email='[email protected]'))
class ParameterTestCase(unittest.TestCase):
""" test for Parameter object """
@classmethod
def setUpClass(kls):
kls.app = App._create_(get_test_data_folder(version='2.0', which=os.path.join('schema', 'model')))
def test_unknown(self):
p = self.app.resolve('#/paths/~1t/put')
self.assertRaises(ValueError, p, p1='tom', p2='mary', p3='qoo', p4='unknown')
def test_collection_format_default(self):
""" when not defining 'collectFormat', its default should be 'csv'
refer to issue: https://github.com/mission-liao/pyswagger/issues/101
"""
self.app.resolve('#/paths/~1a/get')(p1=['test1', 'test2']) # should not raise exception
class PrimitiveExtensionTestCase(unittest.TestCase):
""" test for extending primitives """
@classmethod
def setUpClass(kls):
factory = Primitive()
def decode_int(obj, val, ctx):
# minus 1
return int(val) - 1
def decode_str(obj, val, ctx):
# remove the last char
return str(val)[:-1]
def str_no_validate(obj, val, ctx):
# same as the one used in pyswagger, but no validation
return str(val)
factory.register('encoded_integer', None, decode_int)
factory.register('string', 'special_encoded', decode_str)
factory.register('string', None, str_no_validate, _2nd_pass=None)
kls.app = App.load(get_test_data_folder(
version='2.0',
which=os.path.join('schema', 'extension'),
), prim=factory)
kls.app.prepare()
def test_extend(self):
""" extend primitives with user defined type/format handler """
m1 = self.app.resolve('#/definitions/m1')
v = m1._prim_({
"_id": 100,
"name": 'Ms',
}, self.app.prim_factory)
self.assertEqual(v._id, 99)
self.assertEqual(v.name, 'M')
def test_overwrite(self):
""" overrite type/format handler used in pyswagger """
m1 = self.app.resolve('#/definitions/m1')
v = m1._prim_({
"job":"man"
}, self.app.prim_factory)
# should not raise
self.assertEqual(v.job, "man")
app = App.create(get_test_data_folder(
version='2.0',
which=os.path.join('schema', 'extension')
))
m1 = app.resolve('#/definitions/m1')
self.assertRaises(errs.ValidationError, m1._prim_, {'job':'man'}, app.prim_factory)
# should raise
| {
"content_hash": "4d49ad01138a6e00eda51f252c4076e9",
"timestamp": "",
"source": "github",
"line_count": 559,
"max_line_length": 136,
"avg_line_length": 36.075134168157426,
"alnum_prop": 0.5638202915798869,
"repo_name": "mission-liao/pyswagger",
"id": "a3932b41e9b9bd00ccba5e64fa4e4d5327f4e032",
"size": "20166",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyswagger/tests/v2_0/test_prim.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "389129"
}
],
"symlink_target": ""
} |
"""
sdep.__main__ is executed when `sdep` is called as a script from the command
line.
"""
from .cli import main
main()
| {
"content_hash": "5467c80fac4ddae53d2af38e59809167",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 76,
"avg_line_length": 17.285714285714285,
"alnum_prop": 0.6776859504132231,
"repo_name": "mattjmcnaughton/sdep",
"id": "89796b7fb6774fdf3a1901bc47227b4f90bf4d48",
"size": "121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdep/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1354"
},
{
"name": "Python",
"bytes": "35941"
}
],
"symlink_target": ""
} |
"""Script to auto-generate API docs.
"""
from __future__ import print_function, division
# stdlib imports
import sys
import re
# local imports
from apigen import ApiDocWriter
# version comparison
from distutils.version import LooseVersion as V
#*****************************************************************************
def abort(error):
print('*WARNING* API documentation not generated: %s' % error)
exit()
def writeapi(package, outdir, source_version, other_defines=True):
# Check that the package is available. If not, the API documentation is not
# (re)generated and existing API documentation sources will be used.
try:
__import__(package)
except ImportError:
abort("Can not import " + package)
module = sys.modules[package]
# Check that the source version is equal to the installed
# version. If the versions mismatch the API documentation sources
# are not (re)generated. This avoids automatic generation of documentation
# for older or newer versions if such versions are installed on the system.
installed_version = V(module.__version__)
if source_version != installed_version:
abort("Installed version does not match source version")
docwriter = ApiDocWriter(package, rst_extension='.rst',
other_defines=other_defines)
docwriter.package_skip_patterns += [r'\.%s$' % package,
r'.*test.*$',
r'.*duecredit.*$',
r'.*due.*$',
r'\.version.*$']
docwriter.write_api_docs(outdir)
docwriter.write_index(outdir, 'index', relative_to=outdir)
print('%d files written' % len(docwriter.written_modules))
if __name__ == '__main__':
package = sys.argv[1]
outdir = sys.argv[2]
try:
other_defines = sys.argv[3]
except IndexError:
other_defines = True
else:
other_defines = other_defines in ('True', 'true', '1')
writeapi(package, outdir, other_defines=other_defines)
| {
"content_hash": "75f5ee8ba932ce04b233b7dcb0aa5e75",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 32.46153846153846,
"alnum_prop": 0.595260663507109,
"repo_name": "kpolimis/sklearn-forest-ci",
"id": "5fe41e7b65bf7ffc7fc3903cb9643eaa4710452d",
"size": "2132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/tools/buildmodref.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11766"
},
{
"name": "Shell",
"bytes": "3145"
}
],
"symlink_target": ""
} |
import time
import RPi.GPIO as GPIO
from calvin.runtime.south.plugins.io.gpio import base_gpiopin
class GPIOPin(base_gpiopin.GPIOPinBase):
"""
Raspberry Pi gpio pin implementation based on the RPi.GPIO package
"""
def __init__(self, trigger, pin, direction, pull):
super(GPIOPin, self).__init__(trigger, pin, direction, pull)
self.trigger = trigger
self.pin = pin
self.has_changed = False
self.value = None
self.pwm = None
GPIO.setmode(GPIO.BCM)
if direction == "i":
if pull is not None:
if pull == "u":
GPIO.setup(pin, GPIO.IN, GPIO.PUD_UP)
elif pull == "d":
GPIO.setup(pin, GPIO.IN, GPIO.PUD_DOWN)
else:
GPIO.setup(pin, GPIO.IN)
elif direction == "o":
GPIO.setup(pin, GPIO.OUT)
def cb_detect_edge(self, channel):
self.has_changed = True
if GPIO.input(self.pin) is GPIO.LOW:
self.value = 0
else:
self.value = 1
self.trigger()
def detect_edge(self, edge):
if edge == "r":
GPIO.add_event_detect(self.pin, GPIO.RISING, callback=self.cb_detect_edge)
elif edge == "f":
GPIO.add_event_detect(self.pin, GPIO.FALLING, callback=self.cb_detect_edge)
elif edge == "b":
GPIO.add_event_detect(self.pin, GPIO.BOTH, callback=self.cb_detect_edge)
def edge_detected(self):
return self.has_changed
def edge_value(self):
self.has_changed = False
return self.value
def set_state(self, state):
if state:
GPIO.output(self.pin, GPIO.HIGH)
else:
GPIO.output(self.pin, GPIO.LOW)
def get_state(self):
if GPIO.input(self.pin) is GPIO.LOW:
return 0
return 1
def pwm_start(self, frequency, dutycycle):
self.pwm = GPIO.PWM(self.pin, frequency)
self.pwm.start(dutycycle)
def pwm_set_frequency(self, frequency):
self.pwm.ChangeFrequency(frequency)
def pwm_set_dutycycle(self, dutycycle):
self.pwm.ChangeDutyCycle(dutycycle)
def pwm_stop(self):
self.pwm.stop()
def shift_out(self, data, repeat):
for x in range(0, repeat):
for bit in data:
GPIO.output(self.pin, bit[0])
time.sleep(bit[1]/1000000.0)
def close(self):
GPIO.cleanup(self.pin)
| {
"content_hash": "a1d67657c3f84a109f6e6a8fd834893d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 87,
"avg_line_length": 29.305882352941175,
"alnum_prop": 0.5636290646326776,
"repo_name": "josrolgil/exjobbCalvin",
"id": "b819a6a8a5fd7f1a1e3ee506232cb20c483fffe3",
"size": "3096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/runtime/south/plugins/io/gpio/platform/raspberry_pi/rpigpio_impl/gpiopin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1727"
},
{
"name": "HTML",
"bytes": "7958"
},
{
"name": "JavaScript",
"bytes": "59355"
},
{
"name": "Python",
"bytes": "1579174"
},
{
"name": "Shell",
"bytes": "12920"
}
],
"symlink_target": ""
} |
import sys
import zmq
import pymongo
import pymongo.json_util
import json
class MongoZMQ(object):
"""
ZMQ server that adds/fetches documents (ie dictionaries) to a MongoDB.
NOTE: mongod must be started before using this class
"""
def __init__(self, db_name, table_name, bind_addr="tcp://127.0.0.1:5000"):
"""
bind_addr: address to bind zmq socket on
db_name: name of database to write to (created if doesn't exist)
table_name: name of mongodb 'table' in the db to write to (created if doesn't exist)
"""
self._bind_addr = bind_addr
self._db_name = db_name
self._table_name = table_name
self._conn = pymongo.Connection()
self._db = self._conn[self._db_name]
self._table = self._db[self._table_name]
def _doc_to_json(self, doc):
return json.dumps(doc,default=pymongo.json_util.default)
def add_document(self, doc):
"""
Inserts a document (dictionary) into mongo database table
"""
print 'adding docment %s' % (doc)
try:
self._table.insert(doc)
except Exception,e:
return 'Error: %s' % e
def get_document_by_keys(self, keys):
"""
Attempts to return a single document from database table that matches
each key/value in keys dictionary.
"""
print 'attempting to retrieve document using keys: %s' % keys
try:
return self._table.find_one(keys)
except Exception,e:
return 'Error: %s' % e
def start(self):
context = zmq.Context()
socket = context.socket(zmq.ROUTER)
socket.bind(self._bind_addr)
while True:
msg = socket.recv_multipart()
print "Received msg: ", msg
if len(msg) != 3:
error_msg = 'invalid message received: %s' % msg
print error_msg
reply = [msg[0], error_msg]
socket.send_multipart(reply)
continue
id = msg[0]
operation = msg[1]
contents = json.loads(msg[2])
# always send back the id with ROUTER
reply = [id]
if operation == 'add':
self.add_document(contents)
reply.append("success")
elif operation == 'get':
doc = self.get_document_by_keys(contents)
json_doc = self._doc_to_json(doc)
reply.append(json_doc)
else:
print 'unknown request'
socket.send_multipart(reply)
def main():
MongoZMQ('ipcontroller','jobs').start()
if __name__ == "__main__":
main()
| {
"content_hash": "024476da9413de872e4e8cd6e4242441",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 92,
"avg_line_length": 32.30952380952381,
"alnum_prop": 0.547162859248342,
"repo_name": "swn1/pyzmq",
"id": "c1a10fc4655256bb47cdc265aff12f0d18765edd",
"size": "3052",
"binary": false,
"copies": "1",
"ref": "refs/heads/ironpython",
"path": "examples/mongodb/controller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "22208"
},
{
"name": "C++",
"bytes": "31677"
},
{
"name": "Python",
"bytes": "662126"
}
],
"symlink_target": ""
} |
import datetime
import logging
import requests
from django.utils import timezone
from . import AbstractFeed, AbstractFeedQuery, FeedError, FeedItem
logger = logging.getLogger('wagtailsocialfeed')
class InstagramFeedItem(FeedItem):
"""Implements instagram-specific behaviour"""
@classmethod
def get_post_date(cls, raw):
if 'date' in raw:
timestamp = None
try:
timestamp = float(raw['date'])
except ValueError:
return None
return timezone.make_aware(
datetime.datetime.fromtimestamp(timestamp), timezone=timezone.utc)
return None
@classmethod
def from_raw(cls, raw):
image = {}
caption = None
if 'display_src' in raw:
image = {
'thumb': raw['thumbnail_resources'][1],
'small': raw['thumbnail_resources'][2],
'medium': raw['thumbnail_resources'][3],
'large': raw['thumbnail_resources'][4],
'original_link': "https://www.instagram.com/p/" + raw['code']
}
if 'caption' in raw:
caption = raw['caption']
return cls(
id=raw['id'],
type='instagram',
text=caption,
image_dict=image,
posted=cls.get_post_date(raw),
original_data=raw,
)
class InstagramFeedQuery(AbstractFeedQuery):
def _get_load_kwargs(self, oldest_post):
# Trick from twitter API doc to exclude the oldest post from
# the next result-set
return {'max_id': self.oldest_post['id']}
def _search(self, raw_item):
"""Very basic search function"""
return self.query_string.lower() in raw_item
def _load(self, max_id=None):
url = "https://www.instagram.com/{}/?__a=1".format(self.username)
if max_id:
url += "?max_id={}".format(max_id)
resp = requests.get(url)
if resp.status_code == 200:
try:
return resp.json()['user']['media']['nodes']
except ValueError as e:
raise FeedError(e)
except KeyError as e:
raise FeedError("No items could be found in the response")
raise FeedError(resp.reason)
class InstagramFeed(AbstractFeed):
item_cls = InstagramFeedItem
query_cls = InstagramFeedQuery
| {
"content_hash": "20255dd9cc0caa804821a3e7d75be689",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 82,
"avg_line_length": 29.82716049382716,
"alnum_prop": 0.5629139072847682,
"repo_name": "LUKKIEN/wagtailsocialfeed",
"id": "939b1e99b227fe7af8aaca4251ee58925c3c598a",
"size": "2416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtailsocialfeed/utils/feed/instagram.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "808"
},
{
"name": "HTML",
"bytes": "5680"
},
{
"name": "JavaScript",
"bytes": "836"
},
{
"name": "Makefile",
"bytes": "2468"
},
{
"name": "Python",
"bytes": "71927"
}
],
"symlink_target": ""
} |
import argparse
import os
import simpleaudio as sa
def soundOut (soundName) :
files = {
'roar':'chewy_roar.wav',
'battle':'light-sabre-battle.wav',
'sabreon':'light-sabre-on.wav',
'sabroff':'light-sabre-off.wav',
'breathing':'starwar-vader-breathing.wav',
}
path=os.path.dirname(os.path.realpath(__file__))
fullPath= path + '/../../resources/sound-samples/' + files[soundName]
print("fullPath=" + fullPath)
wave_obj = sa.WaveObject.from_wave_file(fullPath)
play_obj = wave_obj.play()
play_obj.wait_done()
if __name__ == '__main__':
## get input from command line.
## python3 playAudio.py -sn sabreon
## or python3 playAudio.py
parser = argparse.ArgumentParser(
description='Voice out sounds given an input argument.')
parser.add_argument(
'-sn',
'--soundName',
help='the name of the sound.',
default='battle',
choices=['battle', 'sabreon', 'sabroff', 'breathing', 'roar'])
args = parser.parse_args()
## call function soundOut
soundOut(args.soundName)
| {
"content_hash": "d018e3c5137fa54119d748684abb741b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 73,
"avg_line_length": 29.289473684210527,
"alnum_prop": 0.6082659478885895,
"repo_name": "melvinma/funbots",
"id": "f3b38b7c1f976cd4b702554d8e52ee2ae9159afc",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python-src/examples/playAudio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16409"
},
{
"name": "Shell",
"bytes": "659"
}
],
"symlink_target": ""
} |
from SPARQLWrapper import SPARQLWrapper, JSON
import os
import sys
import array
import re
# This function checks if the given string has a direct resource in dbpedia database
# and returns the resource URL
def getDirectResourceUrl(locationString, database):
#database.setQuery("""SELECT DISTINCT ?Dir WHERE {?Dir rdfs:label \"""" + locationString + """\"@en; a owl:Thing}""")
database.setQuery("""SELECT DISTINCT ?Dir WHERE {?Dir rdfs:label \"""" + locationString + """\"@en; a owl:Thing}""")
database.setReturnFormat(JSON)
results = database.query().convert()
for result in results["results"]["bindings"]:
return result["Dir"]["value"]
# This function checks if a string is a redirection
# and returns original resources URL
def getRedirectedResourceURL(locationString, database):
database.setQuery("""SELECT DISTINCT ?Redir WHERE {dbr:""" + locationString + """ dbo:wikiPageRedirects ?Redir}""")
database.setReturnFormat(JSON)
results = database.query().convert()
for result in results["results"]["bindings"]:
return result["Redir"]["value"]
# Function to query if a given resource has one of the below properties
# signifying that it is a location.
def isLocation(resourceUrl, database):
if resourceUrl is None:
return None
database.setQuery("""
SELECT COUNT(?o) AS ?NoOfResults
WHERE {<""" + resourceUrl + """> a ?o
FILTER (?o = dbo:PopulatedPlace OR
?o = dbo:Place OR
?o = dbo:Location OR
?o = dbo:Settlement OR
?o = dbo:Town OR
?o = dbo:City OR
?o = dbo:AdministrativeRegion OR
?o = yago:Location100027167 OR
?o = umbel-rc:PopulatedPlace)
}
""")
database.setReturnFormat(JSON)
results = database.query().convert()
for result in results["results"]["bindings"]:
return result["NoOfResults"]["value"]
# Function to query in case a location has any disambiguates.
# If it does, the function counts all disambiguates, which have below properties.
# If a function returns >0, this means the the string is a location
def checkDisambiguates(locationString, database):
database.setQuery("""
select DISTINCT Count(?Disamb) as ?countOfDisambg where
{
dbr:""" + locationString + """ dbo:wikiPageDisambiguates ?Disamb .
?Disamb a ?types .
FILTER (?types = dbo:PopulatedPlace OR
?types = dbo:Place OR
?types = dbo:Location OR
?types = dbo:Settlement OR
?types = dbo:Town OR
?types = dbo:City OR
?types = dbo:AdministrativeRegion OR
?types = yago:Location100027167 OR
?types = umbel-rc:PopulatedPlace)
}
""")
database.setReturnFormat(JSON)
results = database.query().convert()
for result in results["results"]["bindings"]:
return result["countOfDisambg"]["value"]
def checkLocation(input):
#userInput = raw_input("What location are you trying to check? (use ';' for multiple inputs)\n")
databaseWrapper = SPARQLWrapper("http://dbpedia.org/sparql")
if len(input) > 1:
inputArray = input.split(';') #allows multiple inputs separated by ';'
for loc in inputArray:
locationString = loc
locationString = locationString.replace("_"," ")
locationString = locationString.strip()
locationString = re.sub(' +',' ',locationString)
locationString = locationString.title()
resourceUrl = getDirectResourceUrl(locationString, databaseWrapper) #Check for direct resource
#If string has "-", try lowering the case of some names after "-"
if resourceUrl is None and '-' in locationString:
splitArray=locationString.split("-") #split the location into an array
for i in range(1,len(splitArray)):
inst=splitArray[:] #create instance
inst[i]=inst[i].lower() #lowercase i word in the array
locationStringMod = "-".join(inst) # rejoin array to a location
resourceUrl = getDirectResourceUrl(locationStringMod, databaseWrapper) #Check for direct resource
if resourceUrl is not None:
break
if resourceUrl is None:
locationString = locationString.replace(" ","_")
resourceUrl = getRedirectedResourceURL(locationString, databaseWrapper) #Check for indirect resource
locationType = isLocation(resourceUrl, databaseWrapper) #Check if string is a location
if locationType is not None:
if int(locationType)>0:
return True
else:
return False
else:
DisambCount = int(checkDisambiguates(locationString, databaseWrapper)) #Check for disambiguates
if DisambCount>0:
return True
else:
return False
# Function that evaluates text files contained 'locs' folder.
# Script file has to be in the same directory as 'locs' folder
def evaluation(workingDir):
print 'Running...Please wait'
locDir = os.listdir(workingDir+'\locs')
ok = 0
ko = 0
for locFile in locDir:
for locInput in open(workingDir+'\locs\\'+locFile):
try:
locCheck=checkLocation(locInput);
if locCheck:
ok+=1
else:
ko+=1
except:
pass0
print 'Done'
print 'Locations - ',
print ok
print 'Not locations - ',
print ko
print float(ok)/(ok+ko)
evaluation(os.path.dirname(os.path.realpath(__file__))) | {
"content_hash": "0e6eca32351981d9f03e4e2c05482ba4",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 118,
"avg_line_length": 33.939189189189186,
"alnum_prop": 0.7075452916583715,
"repo_name": "andpol5/QueryParsingForInlp",
"id": "587b2278af7b22bd80f422e5a626801ce4ca5903",
"size": "5268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exercise1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61303"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_geopostcodes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='locality',
options={'verbose_name_plural': 'localities', 'verbose_name': 'locality'},
),
migrations.AlterField(
model_name='locality',
name='postcode',
field=models.CharField(db_index=True, verbose_name='postcode', help_text='ZIP/Postal code', max_length=15),
),
migrations.AlterField(
model_name='locality',
name='suburb',
field=models.CharField(verbose_name='suburb', help_text='Locality subdivision', max_length=80),
),
]
| {
"content_hash": "bab4199e4410d87cc39fcce01b705090",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 119,
"avg_line_length": 30.703703703703702,
"alnum_prop": 0.5958986731001207,
"repo_name": "alexhayes/django-geopostcodes",
"id": "1dffda4fdb44d38b60a6b57ce5f0c817e02a0d98",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_geopostcodes/migrations/0002_auto_20151021_2127.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2426"
},
{
"name": "Python",
"bytes": "26750"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
import six
from telemetry.core import os_version as os_version_module
from telemetry.story import expectations
from telemetry.testing import fakes
class MockState():
def __init__(self):
self.platform = fakes.FakePlatform()
class MockStory():
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class MockStorySet():
def __init__(self, stories):
self._stories = stories
@property
def stories(self):
return self._stories
class MockBrowserFinderOptions():
def __init__(self):
self._browser_type = None
@property
def browser_type(self):
return self._browser_type
@browser_type.setter
def browser_type(self, t):
assert isinstance(t, six.string_types)
self._browser_type = t
class TestConditionTest(unittest.TestCase):
def setUp(self):
self._platform = fakes.FakePlatform()
self._finder_options = MockBrowserFinderOptions()
def testAllAlwaysReturnsTrue(self):
self.assertTrue(
expectations.ALL.ShouldDisable(self._platform, self._finder_options))
def testAllWinReturnsTrueOnWindows(self):
self._platform.SetOSName('win')
self.assertTrue(
expectations.ALL_WIN.ShouldDisable(self._platform,
self._finder_options))
def testAllWinReturnsFalseOnOthers(self):
self._platform.SetOSName('not_windows')
self.assertFalse(
expectations.ALL_WIN.ShouldDisable(self._platform,
self._finder_options))
def testAllLinuxReturnsTrueOnLinux(self):
self._platform.SetOSName('linux')
self.assertTrue(expectations.ALL_LINUX.ShouldDisable(self._platform,
self._finder_options))
def testAllLinuxReturnsFalseOnOthers(self):
self._platform.SetOSName('not_linux')
self.assertFalse(expectations.ALL_LINUX.ShouldDisable(self._platform,
self._finder_options))
def testAllMacReturnsTrueOnMac(self):
self._platform.SetOSName('mac')
self.assertTrue(expectations.ALL_MAC.ShouldDisable(self._platform,
self._finder_options))
def testAllMacReturnsFalseOnOthers(self):
self._platform.SetOSName('not_mac')
self.assertFalse(expectations.ALL_MAC.ShouldDisable(self._platform,
self._finder_options))
def testAllChromeOSReturnsTrueOnChromeOS(self):
self._platform.SetOSName('chromeos')
self.assertTrue(expectations.ALL_CHROMEOS.ShouldDisable(
self._platform, self._finder_options))
def testAllChromeOSReturnsFalseOnOthers(self):
self._platform.SetOSName('not_chromeos')
self.assertFalse(expectations.ALL_CHROMEOS.ShouldDisable(
self._platform, self._finder_options))
def testAllAndroidReturnsTrueOnAndroid(self):
self._platform.SetOSName('android')
self.assertTrue(
expectations.ALL_ANDROID.ShouldDisable(self._platform,
self._finder_options))
def testAllAndroidReturnsFalseOnOthers(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ALL_ANDROID.ShouldDisable(self._platform,
self._finder_options))
def testAllDesktopReturnsFalseOnNonDesktop(self):
false_platforms = ['android']
for plat in false_platforms:
self._platform.SetOSName(plat)
self.assertFalse(
expectations.ALL_DESKTOP.ShouldDisable(self._platform,
self._finder_options))
def testAllDesktopReturnsTrueOnDesktop(self):
true_platforms = ['win', 'mac', 'linux', 'chromeos']
for plat in true_platforms:
self._platform.SetOSName(plat)
self.assertTrue(
expectations.ALL_DESKTOP.ShouldDisable(self._platform,
self._finder_options))
def testAllMobileReturnsFalseOnNonMobile(self):
false_platforms = ['win', 'mac', 'linux', 'chromeos']
for plat in false_platforms:
self._platform.SetOSName(plat)
self.assertFalse(
expectations.ALL_MOBILE.ShouldDisable(self._platform,
self._finder_options))
def testAllMobileReturnsTrueOnMobile(self):
true_platforms = ['android']
for plat in true_platforms:
self._platform.SetOSName(plat)
self.assertTrue(
expectations.ALL_MOBILE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5ReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS5.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5XReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS5X.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6ReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6PReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS6P.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus7ReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NEXUS7.ShouldDisable(self._platform,
self._finder_options))
def testAndroidCherryMobileReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_ONE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidSvelteReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_SVELTE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5ReturnsFalseOnAndroidNotNexus5(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS5.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5XReturnsFalseOnAndroidNotNexus5X(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS5X.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5ReturnsFalseOnAndroidNexus5X(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 5X')
self.assertFalse(
expectations.ANDROID_NEXUS5.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6ReturnsFalseOnAndroidNotNexus6(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6ReturnsFalseOnAndroidNexus6P(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 6P')
self.assertFalse(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6PReturnsFalseOnAndroidNotNexus6P(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS6P.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus7ReturnsFalseOnAndroidNotNexus7(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_NEXUS7.ShouldDisable(self._platform,
self._finder_options))
def testAndroidCherryMobileReturnsFalseOnAndroidNotCherryMobile(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_ONE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidSvelteReturnsFalseOnAndroidNotSvelte(self):
self._platform.SetOSName('android')
self.assertFalse(
expectations.ANDROID_SVELTE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5ReturnsTrueOnAndroidNexus5(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 5')
self.assertTrue(
expectations.ANDROID_NEXUS5.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus5XReturnsTrueOnAndroidNexus5X(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 5X')
self.assertTrue(
expectations.ANDROID_NEXUS5X.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6ReturnsTrueOnAndroidNexus6(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 6')
self.assertTrue(
expectations.ANDROID_NEXUS6.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus6PReturnsTrueOnAndroidNexus6P(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 6P')
self.assertTrue(
expectations.ANDROID_NEXUS6P.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNexus7ReturnsTrueOnAndroidNexus7(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('Nexus 7')
self.assertTrue(
expectations.ANDROID_NEXUS7.ShouldDisable(self._platform,
self._finder_options))
def testAndroidCherryMobileReturnsTrueOnAndroidCherryMobile(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('W6210')
self.assertTrue(
expectations.ANDROID_ONE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidSvelteReturnsTrueOnAndroidSvelte(self):
self._platform.SetOSName('android')
self._platform.SetIsSvelte(True)
self.assertTrue(
expectations.ANDROID_SVELTE.ShouldDisable(self._platform,
self._finder_options))
def testAndroidWebviewReturnsTrueOnAndroidWebview(self):
self._platform.SetOSName('android')
self._platform.SetIsAosp(True)
self._finder_options.browser_type = 'android-webview'
self.assertTrue(
expectations.ANDROID_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testAndroidWebviewReturnsTrueOnAndroidWebviewGoogle(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview-google'
self.assertTrue(
expectations.ANDROID_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testAndroidWebviewReturnsFalseOnAndroidNotWebview(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-chrome'
self.assertFalse(
expectations.ANDROID_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testAndroidWebviewReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNotWebviewReturnsTrueOnAndroidNotWebview(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android'
self.assertTrue(
expectations.ANDROID_NOT_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNotWebviewReturnsFalseOnAndroidWebview(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self.assertFalse(
expectations.ANDROID_NOT_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testAndroidNotWebviewReturnsFalseOnNotAndroid(self):
self._platform.SetOSName('not_android')
self.assertFalse(
expectations.ANDROID_NOT_WEBVIEW.ShouldDisable(self._platform,
self._finder_options))
def testMac1011ReturnsTrueOnMac1011(self):
self._platform.SetOSName('mac')
self._platform.SetOsVersionDetailString('10.11')
self.assertTrue(
expectations.MAC_10_11.ShouldDisable(self._platform,
self._finder_options))
def testMac1011ReturnsFalseOnNotMac1011(self):
self._platform.SetOSName('mac')
self._platform.SetOsVersionDetailString('10.12')
self.assertFalse(
expectations.MAC_10_11.ShouldDisable(self._platform,
self._finder_options))
def testMac1012ReturnsTrueOnMac1012(self):
self._platform.SetOSName('mac')
self._platform.SetOsVersionDetailString('10.12')
self.assertTrue(
expectations.MAC_10_12.ShouldDisable(self._platform,
self._finder_options))
def testMac1012ReturnsFalseOnNotMac1012(self):
self._platform.SetOSName('mac')
self._platform.SetOsVersionDetailString('10.11')
self.assertFalse(
expectations.MAC_10_12.ShouldDisable(self._platform,
self._finder_options))
def testNexus5XWebviewFalseOnNotWebview(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android'
self._platform.SetDeviceTypeName('Nexus 5X')
self.assertFalse(
expectations.ANDROID_NEXUS5X_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testNexus5XWebviewFalseOnNotNexus5X(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self.assertFalse(
expectations.ANDROID_NEXUS5X_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testNexus5XWebviewReturnsTrue(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self._platform.SetDeviceTypeName('Nexus 5X')
self.assertTrue(
expectations.ANDROID_NEXUS5X_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testNexus6WebviewFalseOnNotWebview(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android'
self._platform.SetDeviceTypeName('Nexus 6')
self.assertFalse(
expectations.ANDROID_NEXUS6_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testNexus6WebviewFalseOnNotNexus6(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self._platform.SetDeviceTypeName('Nexus 5X')
self.assertFalse(
expectations.ANDROID_NEXUS6_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testNexus6WebviewReturnsTrue(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self._platform.SetDeviceTypeName('Nexus 6')
self.assertTrue(
expectations.ANDROID_NEXUS6_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testAndroidNexus6AOSP(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('AOSP on Shamu')
self.assertTrue(
expectations.ANDROID_NEXUS6.ShouldDisable(
self._platform, self._finder_options))
def testAndroidNexus5XAOSP(self):
self._platform.SetOSName('android')
self._platform.SetDeviceTypeName('AOSP on BullHead')
self.assertTrue(
expectations.ANDROID_NEXUS5X.ShouldDisable(
self._platform, self._finder_options))
def testAndroidNexus6WebviewAOSP(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self._platform.SetDeviceTypeName('AOSP on Shamu')
self.assertTrue(
expectations.ANDROID_NEXUS6_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testAndroidNexus5XWebviewAOSP(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self._platform.SetDeviceTypeName('AOSP on BullHead')
self.assertTrue(
expectations.ANDROID_NEXUS5X_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testWin7(self):
self._platform.SetOSName('win')
self._platform.SetOSVersionName(os_version_module.WIN7)
self.assertTrue(
expectations.WIN_7.ShouldDisable(
self._platform, self._finder_options))
self.assertEqual('Win 7', str(expectations.WIN_7))
def testWin10(self):
self._platform.SetOSName('win')
self._platform.SetOSVersionName(os_version_module.WIN10)
self.assertTrue(
expectations.WIN_10.ShouldDisable(
self._platform, self._finder_options))
self.assertEqual('Win 10', str(expectations.WIN_10))
def testAndroidGoWebviewFalseOnNotWebview(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android'
self._platform.SetDeviceTypeName('gobo')
self.assertFalse(
expectations.ANDROID_GO_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testAndroidGoWebviewFalseOnNotNexus6(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self._platform.SetDeviceTypeName('Nexus 5X')
self.assertFalse(
expectations.ANDROID_GO_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
def testAndroidGoWebviewReturnsTrue(self):
self._platform.SetOSName('android')
self._finder_options.browser_type = 'android-webview'
self._platform.SetDeviceTypeName('gobo')
self.assertTrue(
expectations.ANDROID_GO_WEBVIEW.ShouldDisable(
self._platform, self._finder_options))
| {
"content_hash": "f3f0b88bab4443028bee9b2092c787eb",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 80,
"avg_line_length": 39.95824634655532,
"alnum_prop": 0.6508359456635319,
"repo_name": "catapult-project/catapult",
"id": "9d782ef4a6af9981976b5f398501a7d05dca6dcc",
"size": "19303",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "telemetry/telemetry/story/expectations_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
from c7n.resources.elasticache import _cluster_eligible_for_snapshot
from .common import BaseTest
class TestElastiCacheCluster(BaseTest):
def test_eligibility_snapshot(self):
# so black box testing, due to use of private interface.
self.assertTrue(
_cluster_eligible_for_snapshot(
{'Engine': 'redis', 'CacheNodeType': 'cache.t2.medium'}))
self.assertFalse(
_cluster_eligible_for_snapshot(
{'Engine': 'redis', 'CacheNodeType': 'cache.t1.medium'}))
self.assertFalse(
_cluster_eligible_for_snapshot(
{'Engine': 'memcached', 'CacheNodeType': 'cache.t2.medium'}))
def test_elasticache_security_group(self):
session_factory = self.replay_flight_data("test_elasticache_security_group")
p = self.load_policy(
{
"name": "elasticache-cluster-simple",
"resource": "cache-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
sorted([r["CacheClusterId"] for r in resources]),
["myec-001", "myec-002", "myec-003"],
)
def test_elasticache_subnet_filter(self):
session_factory = self.replay_flight_data(
"test_elasticache_subnet_group_filter"
)
p = self.load_policy(
{
"name": "elasticache-cluster-simple",
"resource": "cache-cluster",
"filters": [
{"type": "subnet", "key": "MapPublicIpOnLaunch", "value": False}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
sorted([r["CacheClusterId"] for r in resources]),
["myec-001", "myec-002", "myec-003"],
)
def test_elasticache_cluster_simple(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_simple")
p = self.load_policy(
{"name": "elasticache-cluster-simple", "resource": "cache-cluster"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_cluster_simple_filter(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_simple")
p = self.load_policy(
{
"name": "elasticache-cluster-simple-filter",
"resource": "cache-cluster",
"filters": [{"type": "value", "key": "Engine", "value": "redis"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_sharded_snapshot_copy_tags(self):
factory = self.replay_flight_data("test_elasticache_sharded_copy_cluster_tags")
client = factory().client("elasticache")
snap_tags = {
t["Key"]: t["Value"]
for t in client.list_tags_for_resource(
ResourceName="arn:aws:elasticache:us-east-2:644160558196:snapshot:zero-bytes"
)[
"TagList"
]
}
self.assertEqual(snap_tags, {"App": "MegaCache"})
p = self.load_policy(
{
"name": "test-copy-cluster-tags",
"resource": "cache-snapshot",
"actions": [
{
"type": "copy-cluster-tags",
"tags": ["App", "Env", "Zone", "Color"],
}
],
},
config=dict(region="us-east-2"),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["SnapshotName"], "zero-bytes")
arn = p.resource_manager.get_arns(resources)[0]
snap_tags = {
t["Key"]: t["Value"]
for t in client.list_tags_for_resource(ResourceName=arn)["TagList"]
}
self.assertEqual(
snap_tags, {"App": "MegaCache", "Color": "Blue", "Env": "Dev", "Zone": "12"}
)
def test_elasticache_snapshot_copy_cluster_tags(self):
session_factory = self.replay_flight_data("test_elasticache_copy_cluster_tags")
client = session_factory().client("elasticache")
results = client.list_tags_for_resource(
ResourceName="arn:aws:elasticache:us-east-1:644160558196:snapshot:myec-backup"
)[
"TagList"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags, {})
policy = self.load_policy(
{
"name": "test-copy-cluster-tags",
"resource": "cache-snapshot",
"actions": [{"type": "copy-cluster-tags", "tags": ["tagkey"]}],
},
config=dict(region="us-east-1"),
session_factory=session_factory,
)
resources = policy.run()
arn = policy.resource_manager.generate_arn(resources[0]["SnapshotName"])
results = client.list_tags_for_resource(ResourceName=arn)["TagList"]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags["tagkey"], "tagval")
def test_elasticache_cluster_available(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_available")
p = self.load_policy(
{
"name": "elasticache-cluster-available",
"resource": "cache-cluster",
"filters": [
{"type": "value", "key": "CacheClusterStatus", "value": "available"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(resources[0]["CacheClusterStatus"], "available")
def test_elasticache_cluster_mark(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_mark")
client = session_factory().client("elasticache")
p = self.load_policy(
{
"name": "elasticache-cluster-mark",
"resource": "cache-cluster",
"filters": [{"type": "value", "key": "Engine", "value": "redis"}],
"actions": [{"type": "mark-for-op", "days": 30, "op": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
arn = p.resource_manager.generate_arn(resources[0]["CacheClusterId"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("maid_status" in tag_map)
def test_elasticache_cluster_unmark(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_unmark")
client = session_factory().client("elasticache")
p = self.load_policy(
{
"name": "elasticache-cluster-unmark",
"resource": "cache-cluster",
"filters": [{"type": "value", "key": "Engine", "value": "redis"}],
"actions": [{"type": "unmark"}],
},
session_factory=session_factory,
)
resources = p.run()
arn = p.resource_manager.generate_arn(resources[0]["CacheClusterId"])
self.assertEqual(len(resources), 3)
tags = client.list_tags_for_resource(ResourceName=arn)
self.assertFalse("maid_status" in tags)
def test_elasticache_cluster_delete(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_delete")
p = self.load_policy(
{
"name": "elasticache-cluster-delete",
"resource": "cache-cluster",
"filters": [{"type": "value", "key": "Engine", "value": "redis"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_cluster_snapshot(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_snapshot")
p = self.load_policy(
{
"name": "elasticache-cluster-snapshot",
"resource": "cache-cluster",
"actions": [{"type": "snapshot"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
class TestElastiCacheSubnetGroup(BaseTest):
def test_elasticache_subnet_group(self):
session_factory = self.replay_flight_data("test_elasticache_subnet_group")
p = self.load_policy(
{"name": "elasticache-subnet-group", "resource": "cache-subnet-group"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class TestElastiCacheSnapshot(BaseTest):
def test_elasticache_snapshot(self):
session_factory = self.replay_flight_data("test_elasticache_snapshot")
p = self.load_policy(
{"name": "elasticache-snapshot", "resource": "cache-snapshot"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 4)
def test_elasticache_snapshot_age_filter(self):
factory = self.replay_flight_data("test_elasticache_snapshot")
p = self.load_policy(
{
"name": "elasticache-snapshot-age-filter",
"resource": "cache-snapshot",
"filters": [{"type": "age", "days": 2, "op": "gt"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 4)
def test_elasticache_snapshot_mark(self):
session_factory = self.replay_flight_data("test_elasticache_snapshot_mark")
client = session_factory().client("elasticache")
p = self.load_policy(
{
"name": "elasticache-snapshot-mark",
"resource": "cache-snapshot",
"filters": [
{
"type": "value",
"key": "SnapshotName",
"value": "backup-myec-001-2017-06-23",
}
],
"actions": [{"type": "mark-for-op", "days": 30, "op": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["SnapshotName"])
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("maid_status" in tag_map)
def test_elasticache_snapshot_unmark(self):
session_factory = self.replay_flight_data("test_elasticache_snapshot_unmark")
client = session_factory().client("elasticache")
p = self.load_policy(
{
"name": "elasticache-snapshot-unmark",
"resource": "cache-snapshot",
"filters": [
{
"type": "value",
"key": "SnapshotName",
"value": "backup-myec-001-2017-06-23",
}
],
"actions": [{"type": "unmark"}],
},
session_factory=session_factory,
)
resources = p.run()
arn = p.resource_manager.generate_arn(resources[0]["SnapshotName"])
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
self.assertFalse("maid_status" in tags)
def test_elasticache_snapshot_delete(self):
factory = self.replay_flight_data("test_elasticache_snapshot_delete")
p = self.load_policy(
{
"name": "elasticache-snapshot-delete",
"resource": "cache-snapshot",
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 4)
class TestModifyVpcSecurityGroupsAction(BaseTest):
def test_elasticache_remove_matched_security_groups(self):
#
# Test conditions:
# - running 2 Elasticache replication group in default VPC with 3 clusters
# - translates to 6 clusters
# - a default security group with id 'sg-7a3fcb13' exists
# - security group named PROD-ONLY-Test-Security-Group exists in VPC and is attached to
# one replication group
# - translates to 3 clusters marked non-compliant
#
# Results in 6 clusters with default Security Group attached
session_factory = self.replay_flight_data(
"test_elasticache_remove_matched_security_groups"
)
client = session_factory().client("elasticache", region_name="ca-central-1")
p = self.load_policy(
{
"name": "elasticache-remove-matched-security-groups",
"resource": "cache-cluster",
"filters": [
{
"type": "security-group",
"key": "GroupName",
"value": "(.*PROD-ONLY.*)",
"op": "regex",
}
],
"actions": [
{
"type": "modify-security-groups",
"remove": "matched",
"isolation-group": "sg-7a3fcb13",
}
],
},
session_factory=session_factory,
)
clean_p = self.load_policy(
{
"name": "elasticache-verifyremove-matched-security-groups",
"resource": "cache-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
waiter = client.get_waiter("replication_group_available")
waiter.wait()
clean_resources = clean_p.run()
# clusters autoscale across AZs, so they get -001, -002, etc appended
self.assertIn("sg-test-base", resources[0]["CacheClusterId"])
self.assertEqual(len(resources), 3)
self.assertEqual(len(resources[0]["SecurityGroups"]), 1)
# show that it was indeed a replacement of security groups
self.assertEqual(len(clean_resources[0]["SecurityGroups"]), 1)
self.assertEqual(len(clean_resources), 6)
def test_elasticache_add_security_group(self):
# Test conditions:
# - running Elasticache replication group in default VPC with 3 clusters
# - a default security group with id 'sg-7a3fcb13' exists
# - security group named PROD-ONLY-Test-Security-Group exists in VPC and is not attached
# - translates to 3 clusters marked to get new group attached
#
# Results in 3 clusters with default Security Group and PROD-ONLY-Test-Security-Group
session_factory = self.replay_flight_data("test_elasticache_add_security_group")
client = session_factory().client("elasticache", region_name="ca-central-1")
p = self.load_policy(
{
"name": "add-sg-to-prod-elasticache",
"resource": "cache-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
"actions": [{"type": "modify-security-groups", "add": "sg-6360920a"}],
},
session_factory=session_factory,
)
clean_p = self.load_policy(
{
"name": "validate-add-sg-to-prod-elasticache",
"resource": "cache-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"},
{
"type": "security-group",
"key": "GroupName",
"value": "PROD-ONLY-Test-Security-Group",
},
],
},
session_factory=session_factory,
)
resources = p.run()
waiter = client.get_waiter("replication_group_available")
waiter.wait()
clean_resources = clean_p.run()
self.assertEqual(len(resources), 3)
self.assertIn("sg-test-base", resources[0]["CacheClusterId"])
self.assertEqual(len(resources[0]["SecurityGroups"]), 1)
self.assertEqual(len(clean_resources[0]["SecurityGroups"]), 2)
self.assertEqual(len(clean_resources), 3)
class TestElastiCacheReplicationGroup(BaseTest):
def test_elasticache_replication_group(self):
session_factory = self.replay_flight_data("test_elasticache_replication_group")
p = self.load_policy(
{"name": "elasticache-rg", "resource": "elasticache-group"},
session_factory=session_factory,)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['ReplicationGroupId'], 'test-c7n-rg')
def test_elasticache_replication_group_delete(self):
session_factory = self.replay_flight_data("test_elasticache_replication_group_delete")
p = self.load_policy(
{
"name": "replication-group-enc-delete",
"resource": "elasticache-group",
"filters": [{"type": "value", "key": "AtRestEncryptionEnabled", "value": False}],
"actions": [{"type": "delete", "snapshot": True}],
},
session_factory=session_factory,)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['ReplicationGroupId'], 'c7n-delete')
client = session_factory().client("elasticache")
response = client.describe_replication_groups(ReplicationGroupId='c7n-delete')
self.assertEqual(response.get('ReplicationGroups')[0].get('Status'), 'deleting')
| {
"content_hash": "f2913cc2e0f545ecd62e971e172b2550",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 99,
"avg_line_length": 39.71729957805907,
"alnum_prop": 0.537873154148518,
"repo_name": "capitalone/cloud-custodian",
"id": "49bc3924e40449fc1fe30e7eca9b6b6707d77b69",
"size": "18953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_elasticache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2190"
},
{
"name": "Go",
"bytes": "135995"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9378"
},
{
"name": "Python",
"bytes": "3693572"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
import sys
from keystoneclient.common import cms
from oslo_log import log
from oslo_log import versionutils
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import stevedore
from keystone.common import controller
from keystone.common import dependency
from keystone.common import utils
from keystone.common import wsgi
import keystone.conf
from keystone import exception
from keystone.federation import constants
from keystone.i18n import _, _LI, _LW
from keystone.resource import controllers as resource_controllers
LOG = log.getLogger(__name__)
CONF = keystone.conf.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_method(method):
plugin_name = CONF.auth.get(method) or 'default'
namespace = 'keystone.auth.%s' % method
try:
driver_manager = stevedore.DriverManager(namespace, plugin_name,
invoke_on_load=True)
return driver_manager.driver
except RuntimeError:
LOG.debug('Failed to load the %s driver (%s) using stevedore, will '
'attempt to load using import_object instead.',
method, plugin_name)
driver = importutils.import_object(plugin_name)
msg = (_(
'Direct import of auth plugin %(name)r is deprecated as of Liberty in '
'favor of its entrypoint from %(namespace)r and may be removed in '
'N.') %
{'name': plugin_name, 'namespace': namespace})
versionutils.report_deprecated_feature(LOG, msg)
return driver
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
keystone.conf.auth.setup_authentication()
for plugin in set(CONF.auth.methods):
AUTH_METHODS[plugin] = load_auth_method(plugin)
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
class AuthContext(dict):
"""Retrofitting auth_context to reconcile identity attributes.
The identity attributes must not have conflicting values among the
auth plug-ins. The only exception is `expires_at`, which is set to its
earliest value.
"""
# identity attributes need to be reconciled among the auth plugins
IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id',
'access_token_id', 'domain_id',
'expires_at'])
def __setitem__(self, key, val):
"""Override __setitem__ to prevent conflicting values."""
if key in self.IDENTITY_ATTRIBUTES and key in self:
existing_val = self[key]
if key == 'expires_at':
# special treatment for 'expires_at', we are going to take
# the earliest expiration instead.
if existing_val != val:
LOG.info(_LI('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.'),
{'existing': existing_val, 'new': val})
if existing_val is None or val is None:
val = existing_val or val
else:
val = min(existing_val, val)
elif existing_val != val:
msg = _('Unable to reconcile identity attribute %(attribute)s '
'as it has conflicting values %(new)s and %(old)s') % (
{'attribute': key,
'new': val,
'old': existing_val})
raise exception.Unauthorized(msg)
return super(AuthContext, self).__setitem__(key, val)
@dependency.requires('resource_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(auth=None, scope_only=False):
auth_info = AuthInfo(auth=auth)
auth_info._validate_and_normalize_auth_data(scope_only)
return auth_info
def __init__(self, auth=None):
self.auth = auth
self._scope_data = (None, None, None, None)
# self._scope_data is (domain_id, project_id, trust_ref, unscoped)
# project scope: (None, project_id, None, None)
# domain scope: (domain_id, None, None, None)
# trust scope: (None, None, trust_ref, None)
# unscoped: (None, None, None, 'unscoped')
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
try:
self.resource_api.assert_project_enabled(
project_id=project_ref['id'],
project=project_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _assert_domain_is_enabled(self, domain_ref):
try:
self.resource_api.assert_domain_enabled(
domain_id=domain_ref['id'],
domain=domain_ref)
except AssertionError as e:
LOG.warning(six.text_type(e))
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _lookup_domain(self, domain_info):
if isinstance(domain_info, dict) is False:
raise exception.ValidationError(attribute='dict',
target='domain')
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
if (CONF.resource.domain_name_url_safe == 'strict' and
utils.is_not_url_safe(domain_name)):
msg = _('Domain name cannot contain reserved characters.')
raise exception.Unauthorized(message=msg)
domain_ref = self.resource_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.resource_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.warning(six.text_type(e))
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
if isinstance(project_info, dict) is False:
raise exception.ValidationError(attribute='dict',
target='project')
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if (CONF.resource.project_name_url_safe == 'strict' and
utils.is_not_url_safe(project_name)):
msg = _('Project name cannot contain reserved characters.')
raise exception.Unauthorized(message=msg)
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.resource_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.resource_api.get_project(project_id)
# NOTE(morganfainberg): The _lookup_domain method will raise
# exception.Unauthorized if the domain isn't found or is
# disabled.
self._lookup_domain({'id': project_ref['domain_id']})
except exception.ProjectNotFound as e:
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _lookup_trust(self, trust_info):
trust_id = trust_info.get('id')
if not trust_id:
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
return trust
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'unscoped' in self.auth['scope'],
'OS-TRUST:trust' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain, OS-TRUST:trust or unscoped',
target='scope')
if 'unscoped' in self.auth['scope']:
self._scope_data = (None, None, None, 'unscoped')
return
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None, None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None, None)
elif 'OS-TRUST:trust' in self.auth['scope']:
if not CONF.trust.enabled:
raise exception.Forbidden('Trusts are disabled.')
trust_ref = self._lookup_trust(
self.auth['scope']['OS-TRUST:trust'])
# TODO(ayoung): when trusts support domains, fill in domain data
if trust_ref.get('project_id') is not None:
project_ref = self._lookup_project(
{'id': trust_ref['project_id']})
self._scope_data = (None, project_ref['id'], trust_ref, None)
else:
self._scope_data = (None, None, trust_ref, None)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self, scope_only=False):
"""Make sure "auth" is valid.
:param scope_only: If it is True, auth methods will not be
validated but only the scope data.
:type scope_only: boolean
"""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
# NOTE(chioleong): Tokenless auth does not provide auth methods,
# we only care about using this method to validate the scope
# information. Therefore, validating the auth methods here is
# insignificant and we can skip it when scope_only is set to
# true.
if scope_only is False:
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Return the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, trust_ref, unscoped).
If scope to a project, (None, project_id, None, None)
will be returned.
If scoped to a domain, (domain_id, None, None, None)
will be returned.
If scoped to a trust, (None, project_id, trust_ref, None),
Will be returned, where the project_id comes from the
trust definition.
If unscoped, (None, None, None, 'unscoped') will be
returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, trust=None,
unscoped=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id and trust:
msg = _('Scoping to both domain and trust is not allowed')
raise ValueError(msg)
if project_id and trust:
msg = _('Scoping to both project and trust is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, trust, unscoped)
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'resource_api', 'token_provider_api', 'trust_api')
class Auth(controller.V3Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
keystone.conf.auth.setup_authentication()
def authenticate_for_token(self, request, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in request.params
try:
auth_info = AuthInfo.create(auth=auth)
auth_context = AuthContext(extras={},
method_names=[],
bind={})
self.authenticate(request, auth_info, auth_context)
if auth_context.get('access_token_id'):
auth_info.set_scope(None, auth_context['project_id'], None)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
token_audit_id = auth_context.get('audit_id')
is_domain = auth_context.get('is_domain')
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
is_domain, domain_id, auth_context, trust, metadata_ref,
include_catalog, parent_audit_id=token_audit_id)
# NOTE(wanghong): We consume a trust use only when we are using
# trusts and have successfully issued a token.
if trust:
self.trust_api.consume_use(trust['id'])
return render_token_data_response(token_id, token_data,
created=True)
except exception.TrustNotFound as e:
raise exception.Unauthorized(e)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, trust, unscoped) = auth_info.get_scope()
if trust:
project_id = trust['project_id']
if domain_id or project_id or trust:
# scope is specified
return
# Skip scoping when unscoped federated token is being issued
if constants.IDENTITY_PROVIDER in auth_context:
return
# Do not scope if request is for explicitly unscoped token
if unscoped is not None:
return
# fill in default_project_id if it is available
try:
user_ref = self.identity_api.get_user(auth_context['user_id'])
except exception.UserNotFound as e:
LOG.warning(six.text_type(e))
raise exception.Unauthorized(e)
default_project_id = user_ref.get('default_project_id')
if not default_project_id:
# User has no default project. He shall get an unscoped token.
return
# make sure user's default project is legit before scoping to it
try:
default_project_ref = self.resource_api.get_project(
default_project_id)
default_project_domain_ref = self.resource_api.get_domain(
default_project_ref['domain_id'])
if (default_project_ref.get('enabled', True) and
default_project_domain_ref.get('enabled', True)):
if self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _LW("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token"
" will be unscoped rather than scoped to the"
" project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _LW("User %(user_id)s's default project %(project_id)s"
" is disabled. The token will be unscoped rather"
" than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _LW("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
def authenticate(self, request, auth_info, auth_context):
"""Authenticate user."""
# The 'external' method allows any 'REMOTE_USER' based authentication
# In some cases the server can set REMOTE_USER as '' instead of
# dropping it, so this must be filtered out
if request.remote_user:
try:
external = get_auth_method('external')
external.authenticate(request,
auth_info,
auth_context)
except exception.AuthMethodNotSupported:
# This will happen there is no 'external' plugin registered
# and the container is performing authentication.
# The 'kerberos' and 'saml' methods will be used this way.
# In those cases, it is correct to not register an
# 'external' plugin; if there is both an 'external' and a
# 'kerberos' plugin, it would run the check on identity twice.
LOG.debug("No 'external' plugin is registered.")
except exception.Unauthorized:
# If external fails then continue and attempt to determine
# user identity using remaining auth methods
LOG.debug("Authorization failed for 'external' auth method.")
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(request,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
@controller.protected()
def check_token(self, request):
token_id = request.context_dict.get('subject_token_id')
token_data = self.token_provider_api.validate_v3_token(
token_id)
# NOTE(morganfainberg): The code in
# ``keystone.common.wsgi.render_response`` will remove the content
# body.
return render_token_data_response(token_id, token_data)
@controller.protected()
def revoke_token(self, request):
token_id = request.context_dict.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected()
def validate_token(self, request):
token_id = request.context_dict.get('subject_token_id')
include_catalog = 'nocatalog' not in request.params
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, request, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
audit_id_only = 'audit_id_only' in request.params
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = utils.isotime(expires)
if audit_id_only:
t.pop('id', None)
data = {'revoked': tokens}
if audit_id_only:
# No need to obfuscate if no token IDs.
return data
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
def _combine_lists_uniquely(self, a, b):
# it's most likely that only one of these will be filled so avoid
# the combination if possible.
if a and b:
return {x['id']: x for x in a + b}.values()
else:
return a or b
@controller.protected()
def get_auth_projects(self, request):
user_id = request.auth_context.get('user_id')
group_ids = request.auth_context.get('group_ids')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_projects_for_user(user_id)
except exception.UserNotFound: # nosec
# federated users have an id but they don't link to anything
pass
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_projects_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.ProjectV3.wrap_collection(
request.context_dict, refs)
@controller.protected()
def get_auth_domains(self, request):
user_id = request.auth_context.get('user_id')
group_ids = request.auth_context.get('group_ids')
user_refs = []
if user_id:
try:
user_refs = self.assignment_api.list_domains_for_user(user_id)
except exception.UserNotFound: # nosec
# federated users have an id but they don't link to anything
pass
grp_refs = []
if group_ids:
grp_refs = self.assignment_api.list_domains_for_groups(group_ids)
refs = self._combine_lists_uniquely(user_refs, grp_refs)
return resource_controllers.DomainV3.wrap_collection(
request.context_dict, refs)
@controller.protected()
def get_auth_catalog(self, request):
user_id = request.auth_context.get('user_id')
project_id = request.auth_context.get('project_id')
if not project_id:
raise exception.Forbidden(
_('A project-scoped token is required to produce a service '
'catalog.'))
# The V3Controller base methods mostly assume that you're returning
# either a collection or a single element from a collection, neither of
# which apply to the catalog. Because this is a special case, this
# re-implements a tiny bit of work done by the base controller (such as
# self-referential link building) to avoid overriding or refactoring
# several private methods.
return {
'catalog': self.catalog_api.get_v3_catalog(user_id, project_id),
'links': {'self': self.base_url(request.context_dict,
path='auth/catalog')}
}
# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
| {
"content_hash": "fc5af1563eab63e131168a6772a0a320",
"timestamp": "",
"source": "github",
"line_count": 669,
"max_line_length": 79,
"avg_line_length": 41.92974588938714,
"alnum_prop": 0.5708530890164344,
"repo_name": "cernops/keystone",
"id": "6d32ad63817ab4f9f87b377e6055de85ea34ffbf",
"size": "28637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/auth/controllers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4691908"
}
],
"symlink_target": ""
} |
import argparse
parser = argparse.ArgumentParser(description='ARAE for 1Bword dataset')
# Path Arguments
parser.add_argument('--data_path', type=str, required=True,
help='location of the data corpus')
parser.add_argument('--kenlm_path', type=str, default='./kenlm',
help='path to kenlm directory')
parser.add_argument('--save', type=str, default='oneb_example',
help='output directory name')
# Data Processing Arguments
parser.add_argument('--maxlen', type=int, default=20,
help='maximum length')
parser.add_argument('--vocab_size', type=int, default=30000,
help='cut vocabulary down to this size '
'(most frequently seen words in train)')
parser.add_argument('--lowercase', dest='lowercase', action='store_true',
help='lowercase all text')
parser.add_argument('--no-lowercase', dest='lowercase', action='store_true',
help='not lowercase all text')
parser.set_defaults(lowercase=True)
# Model Arguments
parser.add_argument('--emsize', type=int, default=500,
help='size of word embeddings')
parser.add_argument('--nhidden', type=int, default=500,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--noise_r', type=float, default=0.1,
help='stdev of noise for autoencoder (regularizer)')
parser.add_argument('--noise_anneal', type=float, default=0.9995,
help='anneal noise_r exponentially by this'
'every 100 iterations')
parser.add_argument('--hidden_init', action='store_true',
help="initialize decoder hidden state with encoder's")
parser.add_argument('--arch_g', type=str, default='500-500',
help='generator architecture (MLP)')
parser.add_argument('--arch_d', type=str, default='500-500',
help='critic/discriminator architecture (MLP)')
parser.add_argument('--z_size', type=int, default=100,
help='dimension of random noise z to feed into generator')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
# Training Arguments
parser.add_argument('--epochs', type=int, default=15,
help='maximum number of epochs')
parser.add_argument('--min_epochs', type=int, default=12,
help="minimum number of epochs to train for")
parser.add_argument('--no_earlystopping', action='store_true',
help="won't use KenLM for early stopping")
parser.add_argument('--patience', type=int, default=2,
help="number of language model evaluations without ppl "
"improvement to wait before early stopping")
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='batch size')
parser.add_argument('--niters_ae', type=int, default=1,
help='number of autoencoder iterations in training')
parser.add_argument('--niters_gan_d', type=int, default=5,
help='number of discriminator iterations in training')
parser.add_argument('--niters_gan_g', type=int, default=1,
help='number of generator iterations in training')
parser.add_argument('--niters_gan_ae', type=int, default=1,
help='number of gan-into-ae iterations in training')
parser.add_argument('--niters_gan_schedule', type=str, default='',
help='epoch counts to increase number of GAN training '
' iterations (increment by 1 each time)')
parser.add_argument('--lr_ae', type=float, default=1,
help='autoencoder learning rate')
parser.add_argument('--lr_gan_g', type=float, default=1e-04,
help='generator learning rate')
parser.add_argument('--lr_gan_d', type=float, default=1e-04,
help='critic/discriminator learning rate')
parser.add_argument('--beta1', type=float, default=0.5,
help='beta1 for adam. default=0.5')
parser.add_argument('--clip', type=float, default=1,
help='gradient clipping, max norm')
parser.add_argument('--gan_clamp', type=float, default=0.01,
help='WGAN clamp')
parser.add_argument('--gan_gp_lambda', type=float, default=10,
help='WGAN GP penalty lambda')
parser.add_argument('--grad_lambda', type=float, default=1,
help='WGAN into AE lambda')
# Evaluation Arguments
parser.add_argument('--sample', action='store_true',
help='sample when decoding for generation')
parser.add_argument('--N', type=int, default=5,
help='N-gram order for training n-gram language model')
parser.add_argument('--log_interval', type=int, default=200,
help='interval to log autoencoder training results')
# Other
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
args = parser.parse_args()
print(vars(args))
exec(open("train.py").read())
| {
"content_hash": "76a01e31f238f52c073bf6e51e8970e8",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 51.3921568627451,
"alnum_prop": 0.6163677985501717,
"repo_name": "jakezhaojb/ARAE",
"id": "d986ef3e5f9e98d312ebaf9da2c08589a0d0d26c",
"size": "5242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lang/run_oneb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Lua",
"bytes": "16781"
},
{
"name": "Python",
"bytes": "132819"
}
],
"symlink_target": ""
} |
from bottle import Bottle, run, static_file, request
import os, bottle, gphoto, shutil, time, camerapi_config
app = Bottle()
@app.route('/listconfig')
def list_config():
return gphoto.get_list(['--list-config'])
@app.route('/getconfig')
def get_config():
querystring = request.query.configkey.strip()
return gphoto.execute(['--get-config', querystring])
@app.route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root=camerapi_config.config['bottle_staticfilepath'], mimetype="text/html")
@app.route('/listsummary')
def summary():
return gphoto.get_list(['--summary'])
@app.route('/captureimage')
def capture_image():
timestring = 'gphoto_' + time.strftime('%Y%m%d_%H%M%S') + '.jpg'
capture = gphoto.execute(['--set-config', '/main/actions/autofocusdrive=1', '--capture-image-and-download'])
if "capt0000.jpg" in capture:
# move the image to the static folder
shutil.copy('capt0000.jpg', 'static/' + timestring)
os.remove('capt0000.jpg')
return timestring
return "false"
@app.route('/capturepreview')
def capture_preview():
capture = gphoto.execute(['--set-config', '/main/actions/autofocusdrive=1', '--capture-preview'])
@app.route('/listabilities')
def list_abilities():
return gphoto.get_list(['--abilities'])
@app.route('/storageinfo')
def storage_info():
return gphoto.execute(['--storage-info'])
@app.route('/resetconnection')
def reset_camera_connection():
is_usb_reset = gphoto.resetusb()
if is_usb_reset:
return "Camera connection " + gphoto.global_usb_port + " has been reset"
return "Error resetting camera connection. Check connection."
bottle.debug(camerapi_config.config['bottle_debug'])
run(app, host=camerapi_config.config['bottle_ip'], port=camerapi_config.config['bottle_port'], reloader=camerapi_config.config['bottle_reloader']) | {
"content_hash": "e93ca4f6f46e679a3c425154406b21aa",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 146,
"avg_line_length": 30.8,
"alnum_prop": 0.7094155844155844,
"repo_name": "RezaMahmood/CameraPi",
"id": "17b580285b7e9a8762bac749cd1c7979fd38e9c3",
"size": "1848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camerapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3445"
},
{
"name": "Python",
"bytes": "3381"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_object_or_404
from .models import Question, Reply, Comment
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse
from django.db.models import Q
from django.core.cache import cache
# Create your views here.
from zhihuuser.models import ZhihuUser
from questions.models import UpDownVote,Topic, Notification,\
UserNotificationCounter
from .models import createNotifications, deleteNotifications, mark_as_read
from questions.forms import addQuestionForm, addReplyForm
from django.shortcuts import redirect
import json
from scipy.constants.constants import mega
def getQuestionArgs(request,question_id):
q = get_object_or_404(Question,pk=question_id)
replies = Reply.objects.filter(question__id=question_id)
args = dict()
args['q'] = q
if 'sort' in request.GET:
newSort = request.GET['sort']
if newSort == 'created':
args['replies'] = replies.order_by('create_date')
else:
args['replies'] = replies
zhihuuser = request.user.zhihuuser
args['zhihuuser'] = request.user.zhihuuser
if zhihuuser.questions.filter(pk=question_id).exists():
args['isfollow'] = True
else:
args['isfollow'] = False
for reply in replies:
if reply.votepeoples.filter(pk=zhihuuser.id).exists():
updownvote = UpDownVote.objects.get(voteman=zhihuuser,reply=reply)
reply.updownflag = updownvote.opinions
else:
reply.updownflag = 'N'
reply.save()
if Reply.objects.filter(question=q, author=zhihuuser).exists():
args['hasReplied'] = True
else:
args['hasReplied'] = False
# messages = Notification.objects.filter(notify_to_user__id=zhihuuser.id)
# args['messages'] = messages
args['message_count'] = UserNotificationCounter.objects.get(pk=zhihuuser.id).unread_count
return args
@login_required
def questionShow(request,question_id):
args = getQuestionArgs(request, question_id)
args['addReply'] = addReplyForm()
return render(request, 'questions/question.html', args)
@login_required
def upVoteAnswer(request):
print '>>>>>>>>upVoteAnswer'
reply_id = None
if request.method == 'GET':
reply_id = request.GET['reply_id']
if reply_id:
reply = Reply.objects.get(pk=reply_id)
user = request.user.zhihuuser
if reply:
if reply.votepeoples.filter(pk=user.id).exists():
updownvote = UpDownVote.objects.get(reply=reply, voteman=user)
if updownvote.opinions == 'D':
updownvote.opinions = 'U'
updownvote.save()
#upvote notification to reply author
createNotifications(from_user=user, to_user=reply.author, notify_type='U', question=reply.question, reply=reply)
#upvote notificaiton from VIP
createNotifications(from_user=user, to_user=user.followers.all(), notify_type='UF', question=reply.question, reply=reply)
else:
updownvote.delete()
#upvote notification to reply author
deleteNotifications(from_user=user, to_user=reply.author, notify_type='U', question=reply.question, reply=reply)
#upvote notificaiton from VIP
deleteNotifications(from_user=user, to_user=user.followers.all(), notify_type='UF', question=reply.question, reply=reply)
else:
newVote = UpDownVote(reply=reply, voteman=user, opinions='U')
newVote.save()
#upvote notification to reply author
createNotifications(from_user=user, to_user=reply.author, notify_type='U', question=reply.question, reply=reply)
#upvote notificaiton from VIP
createNotifications(from_user=user, to_user=user.followers.all(), notify_type='UF', question=reply.question, reply=reply)
#the method below can avoid some confilicts about different user's vote
reply.up_vote = UpDownVote.objects.filter(reply=reply, opinions='U').count()
reply.down_vote = UpDownVote.objects.filter(reply=reply, opinions='D').count()
reply.save()
return HttpResponse(reply.up_vote)
@login_required
def downVoteAnswer(request):
print '>>>>>>>>downVoteAnswer'
reply_id = None
if request.method == 'GET':
reply_id = request.GET['reply_id']
if reply_id:
reply = Reply.objects.get(pk=reply_id)
user = request.user.zhihuuser
if reply:
if reply.votepeoples.filter(pk=user.id).exists():
updownvote = UpDownVote.objects.get(reply=reply, voteman=user)
if updownvote.opinions == 'U':
updownvote.opinions = 'D'
updownvote.save()
#upvote notification to reply author
deleteNotifications(from_user=user, to_user=reply.author, notify_type='U', question=reply.question, reply=reply)
#upvote notificaiton from VIP
deleteNotifications(from_user=user, to_user=user.followers.all(), notify_type='UF', question=reply.question, reply=reply)
else:
updownvote.delete()
else:
newVote = UpDownVote(reply=reply, voteman=user, opinions='D')
newVote.save()
reply.up_vote = UpDownVote.objects.filter(reply=reply, opinions='U').count()
reply.down_vote = UpDownVote.objects.filter(reply=reply, opinions='D').count()
reply.save()
return HttpResponse(reply.up_vote)
@login_required
def followQuestion(request):
print '>>>>>>>>followQuestion'
question_id = None
if request.method == 'GET':
question_id = request.GET['question_id']
if question_id:
q = Question.objects.get(pk=question_id)
user = request.user.zhihuuser
if user.questions.filter(pk=question_id).exists():
user.questions.remove(q)
q.followers_count = q.followers_count - 1
print '>>>>>>>>>del: ', q.followers_count
#interest Notification from VIP
deleteNotifications(from_user=user, to_user=user.followers.all(), notify_type='IF', question=q)
else:
user.questions.add(q)
q.followers_count = q.followers_count + 1
print '>>>>>>>>>add: ', q.followers_count
#interest question Notification from VIP
createNotifications(from_user=user, to_user=user.followers.all(), notify_type='IF', question=q)
q.save()
return HttpResponse(q.followers_count)
@login_required
def addQuestion(request):
if request.method == "POST":
form = addQuestionForm(request.POST)
if form.is_valid():
title = form.cleaned_data.get('title')
details = form.cleaned_data.get('details')
topics = form.cleaned_data.get('topics')
user = request.user.zhihuuser
question = Question.objects.create(author=user, title=title, details=details)
for item in topics.split(' '):
topic = Topic.objects.get(name=item)
question.topics.add(topic)
#create question Notification from VIP
createNotifications(from_user=user, to_user=user.followers.all(), notify_type='CF', question=question)
return redirect(question)
def topicSuggestion(request,max=6):
start = request.GET['start']
topics = []
if start:
topics = Topic.objects.filter(name__contains=start)
if max > 0 and topics.count() > max:
topics = topics[:max]
data = []
for topic in topics:
item = dict()
item['name'] = topic.name
item['topic_id'] = topic.id
data.append(item)
args = dict()
args['data'] = data
return HttpResponse(json.dumps(args))
@login_required
def addReply(request,question_id):
if request.method == "POST":
form = addReplyForm(request.POST)
if form.is_valid():
details = form.cleaned_data.get('details')
user = request.user.zhihuuser
question = Question.objects.get(pk=question_id)
reply = Reply.objects.create(author=user, details=details, question=question)
#create reply Notification from question
createNotifications(from_user=user, to_user=question.followers.all(), notify_type='RQ', question=question, reply=reply)
#create reply Notification from VIP
createNotifications(from_user=user, to_user=user.followers.all(), notify_type='RF', question=question, reply=reply)
return redirect(question)
else:
args = getQuestionArgs(request, question_id)
args['addReply'] = form
return render(request, 'questions/question.html', args)
def topicShow(request, topic_id):
pass
@login_required
def markAllMessage(request):
user = request.user.zhihuuser
# mark_as_read(user)
data = UserNotificationCounter.objects.get(pk=user.id).unread_count
return HttpResponse(data)
def clean_thanksmessages(user,raw_messages):
u_messages = {}
t_messages = {}
for item in raw_messages:
question_id = item.notify_question.id
question_url = item.notify_question.get_absolute_url()
question_title = item.notify_question.title
user_name = item.notify_from_user.fullname
user_url = item.notify_from_user.get_absolute_url()
notify_type = item.notify_type
has_read = item.has_read
data = (question_url, user_name, user_url, notify_type, has_read,question_title)
if notify_type == 'U':
if u_messages.has_key(question_id):
u_messages[question_id].append(data)
else:
u_messages[question_id] = [data,]
elif notify_type == 'T':
if t_messages.has_key(question_id):
t_messages[question_id].append(data)
else:
t_messages[question_id] = [data,]
def messages_merge(a,b):
for item in b:
l = len(b[item])
message = {'question_id': item,
'question_url': b[item][0][0],
'notify_type': b[item][0][3],
'has_read': b[item][0][4],
'title' : b[item][0][5],
'users' : []}
for i in range(0,l):
message['users'].append( {'user_name':b[item][i][1],
'user_url':b[item][i][2]} )
if b[item][i][4] == False:
message['has_read'] = False
a.append(message)
messages = []
messages_merge(messages, u_messages)
messages_merge(messages, t_messages)
return messages
def clean_commonMessages(user,raw_messages):
messages = []
r_messages = {}
for item in raw_messages:
question_id = item.notify_question.id
question_title = item.notify_question.title
question_url = item.notify_question.get_absolute_url()
user_name = item.notify_from_user.fullname
user_url = item.notify_from_user.get_absolute_url()
notify_type = item.notify_type
has_read = item.has_read
if notify_type == 'CF' or notify_type == 'IF':
messages.append({'question_id':question_id,'question_url':question_url,
'user_name':user_name,'user_url':user_url,
'notify_type':notify_type,'has_read':has_read, 'question_title':question_title})
elif notify_type == 'RF' or notify_type == 'RQ':
data = {'question_id':question_id,'question_url':question_url,
'user_name':user_name,'user_url':user_url,
'notify_type':'R','has_read':has_read, 'question_title':question_title}
if r_messages.has_key(question_id):
r_messages[question_id].append( data )
else:
r_messages[question_id] = [data,]
def user_merge(users):
merged = {}
for user in users:
merged[user['user_name']] = user['user_url']
merged_users = []
for user in merged:
merged_users.append({'user_name':user,'user_url':merged[user]})
return merged_users
for question_id in r_messages:
data = {'question_id':question_id,'question_url':r_messages[question_id][0]['question_url'],
'users':[],
'notify_type':r_messages[question_id][0]['notify_type'],'has_read':r_messages[question_id][0]['has_read'],
'question_title':r_messages[question_id][0]['question_title']}
for item in r_messages[question_id]:
data['users'].append({'user_name':item['user_name'],'user_url':item['user_url']})
data['users'] = user_merge(data['users'])
messages.append(data)
return messages
MESSAGE_TIMEOUT= 10
@login_required
def getMessageList(request):
messageType = None
if request.method == 'GET':
messageType = request.GET['messageType']
args = dict()
zhihuuser = request.user.zhihuuser
notifies = Notification.objects.filter(~Q(notify_from_user__id=zhihuuser.id))\
.filter(notify_to_user__id=zhihuuser.id)
if messageType == 'thanks':
if cache.get('thanksmessage') == None:
print 'thanks messages are generating...'
messages = notifies.filter( Q(notify_type='U') | Q(notify_type='T') )
args['messages'] = clean_thanksmessages(zhihuuser, messages)
# args['messages'] = messages
response = render(request,'thanksmessage.html',args)
cache.set('thanksmessage', response, MESSAGE_TIMEOUT)
mark_as_read(zhihuuser,'thanks')
else:
print 'get thanks messages from cache'
return cache.get('thanksmessage')
elif messageType == 'user':
if cache.get('usermessage') == None:
print 'user messages are generating...'
messages = notifies.filter( Q(notify_type='F') )
args['messages'] = messages
response = render(request,'usermessage.html',args)
cache.set('usermessage', response, MESSAGE_TIMEOUT)
mark_as_read(zhihuuser,'user')
else:
print 'get user messages from cache'
return cache.get('usermessage')
elif messageType == 'common':
if cache.get('commonmessage') == None:
print 'common messages are generating...'
messages = notifies.filter( Q(notify_type='RF') | Q(notify_type='RQ') \
| Q(notify_type='CF') | Q(notify_type='IF') )
args['messages'] = clean_commonMessages(zhihuuser, messages)
# args['messages'] = messages
response = render(request,'commonmessage.html',args)
cache.set('commonmessage', response, MESSAGE_TIMEOUT)
mark_as_read(zhihuuser,'common')
else:
print 'get common messages from cache'
return cache.get('commonmessage')
| {
"content_hash": "78e4389f386e3bfa8fb0c2ff1f8f3105",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 178,
"avg_line_length": 43.705555555555556,
"alnum_prop": 0.5858650057200966,
"repo_name": "threegirl2014/zhihuCopy",
"id": "81e5ce75aa7117a0d1e8a1840123b51c19de0541",
"size": "15734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58545"
},
{
"name": "HTML",
"bytes": "59522"
},
{
"name": "JavaScript",
"bytes": "275093"
},
{
"name": "Python",
"bytes": "59248"
}
],
"symlink_target": ""
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay import testing
import numpy as np
from tvm.relay import Expr
from tvm.relay.analysis import free_vars
import pytest
DEBUG_PRINT = False
SEMVER = '#[version = "0.0.5"]\n'
def astext(program, unify_free_vars=False):
text = program.astext()
if isinstance(program, Expr):
roundtrip_program = tvm.parser.parse_expr(text)
else:
roundtrip_program = tvm.parser.fromtext(text)
tvm.ir.assert_structural_equal(roundtrip_program, program, map_free_vars=True)
return text
def show(text):
if DEBUG_PRINT:
print("---------------------------")
print(text)
def test_large_graph():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
for i in range(int(1e6)):
z = relay.add(z, one)
f = relay.Function([x, y], z)
show(astext(f))
def test_func():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.add(z, z)
f = relay.Function([x, y], z)
show(astext(z))
show(astext(f))
def test_mod():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
z = relay.add(x, y)
z = relay.add(z, z)
f = relay.Function([x, y], z)
mod = tvm.IRModule()
mod["myf"] = f
mod = relay.transform.InferType()(mod)
text = astext(mod)
assert "def @myf" in text
assert "def @myf" in str(mod)
assert "add(%0, %0) /* ty=float32 */" in text
assert "add(%0, %0) /* ty=float32 */" in str(mod)
show(mod.astext(annotate=lambda x: str(x.checked_type.dtype) if type(x) == relay.Call else ""))
show(text)
def test_meta_data():
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", shape=(n, c, h, w))
w = relay.var("w")
z = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)
f = relay.Function([x, w], z)
text = astext(f, unify_free_vars=True)
text_no_meta = str(f)
assert "channels=2" in text
assert "channels=2" in text_no_meta
assert "meta[tir.SizeVar][0]" in text
assert "meta[tir.SizeVar][0]" in text_no_meta
assert "type_key" in text
assert "type_key" not in text_no_meta
text = astext(relay.const([1, 2, 3]))
assert "meta[relay.Constant][0]" in text
def test_call_attrs():
x = relay.var("x")
# non default args
z = relay.nn.softmax(x, axis=2)
assert "axis=2" in astext(z)
# default args
z = relay.nn.softmax(x)
assert "softmax(%x)" in astext(z)
# non default args
z = relay.expand_dims(x, axis=2, num_newaxis=2)
assert "num_newaxis=2" in astext(z)
def test_let_if_scope():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
cond = relay.var("cond", "bool")
sb = relay.ScopeBuilder()
with sb.if_scope(cond):
v1 = sb.let("v", relay.const(1, "float32"))
v2 = sb.let("v", x)
sb.ret(relay.subtract(v1, v2))
with sb.else_scope():
v3 = relay.var("v")
let2 = relay.Let(v3, y, v3)
sb.ret(relay.add(let2, let2))
result = sb.get()
f = relay.Function([x, y, cond], result)
text = astext(f)
assert text.count("{") == 3
assert "%cond: bool" in text
show(astext(f))
def test_variable_name():
# avoid pure number even if the namehint is pure number
v1 = relay.var("1")
assert "%v1" in astext(v1)
def test_mlp():
net, _ = tvm.relay.testing.mlp.get_workload(batch_size=1)
astext(net)
def test_resnet():
net, _ = tvm.relay.testing.resnet.get_workload(batch_size=1)
astext(net)
def test_mobilenet():
net, _ = tvm.relay.testing.mobilenet.get_workload(batch_size=1)
astext(net)
def test_dqn():
net, _ = tvm.relay.testing.dqn.get_workload(batch_size=1)
astext(net)
def test_dcgan():
net, _ = tvm.relay.testing.dcgan.get_workload(batch_size=1)
astext(net)
def test_lstm():
net, _ = tvm.relay.testing.lstm.get_workload(1, 1)
astext(net)
net, _ = tvm.relay.testing.lstm.get_workload(4, 4)
astext(net)
def test_inception_v3():
net, _ = tvm.relay.testing.inception_v3.get_workload(batch_size=1)
astext(net)
def test_squeezenet():
for version in ["1.0", "1.1"]:
net, _ = tvm.relay.testing.squeezenet.get_workload(batch_size=1, version=version)
astext(net)
def test_densenet():
net, _ = tvm.relay.testing.densenet.get_workload(batch_size=1)
astext(net)
def test_call_node_order():
x = relay.var("x")
y = relay.var("y")
prog = relay.Call(
relay.Function([x], x), [relay.Call(relay.Function([y], y), [relay.const(1)])]
)
assert astext(prog) == SEMVER + (
"%0 = fn (%y) {\n"
" %y\n"
"};\n"
"%1 = %0(1);\n"
"%2 = fn (%x) {\n"
" %x\n"
"};\n"
"%2(%1)"
)
def test_let_inlining():
tup = relay.Tuple([relay.const(0), relay.const(0)])
x = relay.var("x")
assert astext(relay.Let(x, tup, tup)) == SEMVER + ("%0 = (0, 0);\n" "let %x = %0;\n" "%0")
assert astext(relay.Let(x, tup, x)) == SEMVER + ("let %x = (0, 0);\n" "%x")
def test_zeros():
x = relay.op.zeros([], "float32")
astext(x)
def test_unapplied_constructor():
type_def_str = r"""
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
main_def_str = r"""
def @main[A]() -> fn (A, List[A]) -> List[A] {
Cons
}
"""
mod = tvm.parser.parse(SEMVER + type_def_str + main_def_str)
mod_str = str(mod)
# ensure constructors are printed correctly in type definitions (with their
# signature) and as exprs (without their signature)
assert type_def_str.strip() in mod_str
assert main_def_str.strip() in mod_str
def test_null_attribute():
x = relay.var("x")
y = relay.var("y")
z = relay.Function([x], y)
z = z.with_attr("TestAttribute", None)
txt = astext(z)
assert "TestAttribute=None" in txt
def test_span():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add0"), 0, 0, 0, 0)
)
z = relay.add(z, z)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add1"), 0, 0, 0, 0)
)
f = relay.Function([x, y], z)
txt = astext(f)
assert "Add0" in txt
assert "Add1" in txt
def test_optional_info():
c = relay.const(1)
call = relay.add(c, c)
m = tvm.IRModule.from_expr(call)
m = relay.transform.InferType()(m)
txt = astext(m)
assert txt.count("/* ty=int32 */") == 3
def test_slash_in_identifier():
x = relay.var("base/x")
y = relay.var("base/y")
z = x + y
txt = astext(z)
assert "base/x" in txt
assert "base/y" in txt
if __name__ == "__main__":
pytest.main([__file__])
| {
"content_hash": "1702f9923c29ef7178e35c0f30b6d5e7",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 99,
"avg_line_length": 26.154882154882156,
"alnum_prop": 0.5935890834191555,
"repo_name": "Laurawly/tvm-1",
"id": "21c460fa037169a3591e245227be92e364034d61",
"size": "7768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/relay/test_ir_text_printer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4093"
},
{
"name": "C",
"bytes": "351611"
},
{
"name": "C++",
"bytes": "11660999"
},
{
"name": "CMake",
"bytes": "228510"
},
{
"name": "Cuda",
"bytes": "16902"
},
{
"name": "Cython",
"bytes": "28979"
},
{
"name": "Go",
"bytes": "111527"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "199950"
},
{
"name": "JavaScript",
"bytes": "15305"
},
{
"name": "Makefile",
"bytes": "67149"
},
{
"name": "Objective-C",
"bytes": "24259"
},
{
"name": "Objective-C++",
"bytes": "87655"
},
{
"name": "Python",
"bytes": "16256580"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "228674"
},
{
"name": "TypeScript",
"bytes": "94385"
}
],
"symlink_target": ""
} |
import copy
import functools
from tools.api_proto_plugin import plugin
from tools.api_proto_plugin import visitor
from tools.protoxform import migrate
from tools.protoxform import utils
# Note: we have to include those proto definitions to ensure we don't lose these
# during FileDescriptorProto printing.
from google.api import annotations_pb2 as _
from validate import validate_pb2 as _
from envoy.annotations import deprecation_pb2 as _
from envoy.annotations import resource_pb2
from udpa.annotations import migrate_pb2
from udpa.annotations import sensitive_pb2 as _
from udpa.annotations import status_pb2
class ProtoXformError(Exception):
"""Base error class for the protoxform module."""
class ProtoFormatVisitor(visitor.Visitor):
"""Visitor to generate a proto representation from a FileDescriptor proto.
See visitor.Visitor for visitor method docs comments.
"""
def __init__(self, active_or_frozen, params):
if params['type_db_path']:
utils.LoadTypeDb(params['type_db_path'])
self._freeze = 'extra_args' in params and params['extra_args'] == 'freeze'
self._active_or_frozen = active_or_frozen
def VisitService(self, service_proto, type_context):
return None
def VisitEnum(self, enum_proto, type_context):
return None
def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):
return None
def VisitFile(self, file_proto, type_context, services, msgs, enums):
# Freeze protos that have next major version candidates.
typedb = utils.GetTypeDb()
output_proto = copy.deepcopy(file_proto)
existing_pkg_version_status = output_proto.options.Extensions[
status_pb2.file_status].package_version_status
empty_file = len(services) == 0 and len(enums) == 0 and len(msgs) == 0
pkg_version_status_exempt = file_proto.name.startswith('envoy/annotations') or empty_file
# It's a format error not to set package_version_status.
if existing_pkg_version_status == status_pb2.UNKNOWN and not pkg_version_status_exempt:
raise ProtoXformError('package_version_status must be set in %s' % file_proto.name)
# Only update package_version_status for .active_or_frozen.proto,
# migrate.VersionUpgradeXform has taken care of next major version
# candidates.
if self._active_or_frozen and not pkg_version_status_exempt:
# Freeze if this is an active package with a next major version. Preserve
# frozen status otherwise.
if self._freeze and typedb.next_version_protos.get(output_proto.name, None):
target_pkg_version_status = status_pb2.FROZEN
elif existing_pkg_version_status == status_pb2.FROZEN:
target_pkg_version_status = status_pb2.FROZEN
else:
assert (existing_pkg_version_status == status_pb2.ACTIVE)
target_pkg_version_status = status_pb2.ACTIVE
output_proto.options.Extensions[
status_pb2.file_status].package_version_status = target_pkg_version_status
return str(output_proto)
def Main():
plugin.Plugin([
plugin.DirectOutputDescriptor('.active_or_frozen.proto',
functools.partial(ProtoFormatVisitor, True),
want_params=True),
plugin.OutputDescriptor('.next_major_version_candidate.proto',
functools.partial(ProtoFormatVisitor, False),
functools.partial(migrate.VersionUpgradeXform, 2, False),
want_params=True),
plugin.OutputDescriptor('.next_major_version_candidate.envoy_internal.proto',
functools.partial(ProtoFormatVisitor, False),
functools.partial(migrate.VersionUpgradeXform, 2, True),
want_params=True)
])
if __name__ == '__main__':
Main()
| {
"content_hash": "f048beb270153c6932fc1ce4f2169f2d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 93,
"avg_line_length": 42.20879120879121,
"alnum_prop": 0.6894038010934652,
"repo_name": "istio/envoy",
"id": "9331877aa17fcc8e86f0b636f14e3be4228b1a93",
"size": "4155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/protoxform/protoxform.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35685"
},
{
"name": "C++",
"bytes": "19486055"
},
{
"name": "Dockerfile",
"bytes": "245"
},
{
"name": "Emacs Lisp",
"bytes": "966"
},
{
"name": "Go",
"bytes": "695"
},
{
"name": "JavaScript",
"bytes": "1760"
},
{
"name": "Makefile",
"bytes": "1985"
},
{
"name": "PowerShell",
"bytes": "6173"
},
{
"name": "PureBasic",
"bytes": "472"
},
{
"name": "Python",
"bytes": "418501"
},
{
"name": "Rust",
"bytes": "3471"
},
{
"name": "Shell",
"bytes": "120251"
},
{
"name": "Starlark",
"bytes": "1184414"
},
{
"name": "Thrift",
"bytes": "748"
}
],
"symlink_target": ""
} |
"""
Flex Messaging compatibility tests.
@since: 0.3.2
"""
import unittest
import datetime
import uuid
import pyamf
from pyamf.flex import messaging
class AbstractMessageTestCase(unittest.TestCase):
def test_repr(self):
a = messaging.AbstractMessage()
a.body = u'é,è'
try:
repr(a)
except:
raise
self.fail()
class EncodingTestCase(unittest.TestCase):
"""
Encoding tests for L{messaging}
"""
def test_AcknowledgeMessage(self):
m = messaging.AcknowledgeMessage()
m.correlationId = '1234'
self.assertEquals(pyamf.encode(m).getvalue(),
'\n\x81\x03Uflex.messaging.messages.AcknowledgeMessage\tbody'
'\x11clientId\x1bcorrelationId\x17destination\x0fheaders\x13'
'messageId\x15timeToLive\x13timestamp\x01\x01\x06\t1234\x01\n\x0b'
'\x01\x01\x01\x01\x01')
def test_CommandMessage(self):
m = messaging.CommandMessage(operation='foo.bar')
self.assertEquals(pyamf.encode(m).getvalue(),
'\n\x81\x13Mflex.messaging.messages.CommandMessage\tbody\x11'
'clientId\x1bcorrelationId\x17destination\x0fheaders\x13messageId'
'\x13operation\x15timeToLive\x13timestamp\x01\x01\x01\x01\n\x0b'
'\x01\x01\x01\x06\x0ffoo.bar\x01\x01')
def test_ErrorMessage(self):
m = messaging.ErrorMessage(faultString='ValueError')
self.assertEquals(pyamf.encode(m).getvalue(),
'\n\x81SIflex.messaging.messages.ErrorMessage\tbody\x11'
'clientId\x1bcorrelationId\x17destination\x19extendedData\x13'
'faultCode\x17faultDetail\x17faultString\x0fheaders\x13messageId'
'\x13rootCause\x15timeToLive\x13timestamp\x01\x01\x01\x01\n\x0b'
'\x01\x01\x01\x01\x06\x15ValueError\n\x05\x01\x01\n\x05\x01\x01'
'\x01')
def test_RemotingMessage(self):
m = messaging.RemotingMessage(source='foo.bar')
self.assertEquals(pyamf.encode(m).getvalue(),
'\n\x81\x13Oflex.messaging.messages.RemotingMessage'
'\tbody\x11clientId\x17destination\x0fheaders\x13messageId\x13'
'operation\rsource\x15timeToLive\x13timestamp\x01\x01\x01\n\x0b'
'\x01\x01\x01\x01\x06\x0ffoo.bar\x01\x01')
class SmallMessageTestCase(unittest.TestCase):
"""
Tests for L{messaging.SmallMessageMixIn}
"""
def setUp(self):
self.decoder = pyamf.get_decoder(pyamf.AMF3)
self.buffer = self.decoder.stream
def test_acknowledge(self):
bytes = ('\n\x07\x07DSK\xa8\x03\n\x0b\x01%DSMessagingVersion\x05?\xf0'
'\x00\x00\x00\x00\x00\x00\tDSId\x06IEE0D161D-C11D-25CB-8DBE-3B77B'
'54B55D9\x01\x05Br3&m\x85\x10\x00\x0c!\xee\r\x16\x1d\xc1(&[\xc9'
'\x80RK\x9bE\xc6\xc4\x0c!\xee\r\x16\x1d\xc1=\x8e\xa3\xe0\x10\xef'
'\xad;\xe5\xc5j\x02\x0c!S\x84\x83\xdb\xa9\xc8\xcaM`\x952f\xdbQ'
'\xc9<\x00')
self.buffer.write(bytes)
self.buffer.seek(0)
msg = self.decoder.readElement()
self.assertTrue(isinstance(msg, messaging.AcknowledgeMessageExt))
self.assertEquals(msg.body, None)
self.assertEquals(msg.destination, None)
self.assertEquals(msg.timeToLive, None)
self.assertEquals(msg.timestamp, datetime.datetime(2009, 8, 19, 11, 24, 43, 985000))
self.assertEquals(msg.headers, {
'DSMessagingVersion': 1.0,
'DSId': u'EE0D161D-C11D-25CB-8DBE-3B77B54B55D9'
})
self.assertEquals(msg.clientId, uuid.UUID('ee0d161d-c128-265b-c980-524b9b45c6c4'))
self.assertEquals(msg.messageId, uuid.UUID('ee0d161d-c13d-8ea3-e010-efad3be5c56a'))
self.assertEquals(msg.correlationId, uuid.UUID('538483db-a9c8-ca4d-6095-3266db51c93c'))
self.assertEquals(self.buffer.remaining(), 0)
# now encode the msg to check that encoding is byte for byte the same
buffer = pyamf.encode(msg, encoding=pyamf.AMF3).getvalue()
self.assertEquals(buffer, bytes)
def test_command(self):
bytes = ('\n\x07\x07DSC\x88\x02\n\x0b\x01\tDSId\x06IEE0D161D-C11D-25CB-8DBE-3B77B54B55D9\x01\x0c!\xc0\xdf\xb7|\xd6\xee$1s\x152f\xe11\xa8f\x01\x06\x01\x01\x04\x02')
self.buffer.write(bytes)
self.buffer.seek(0)
msg = self.decoder.readElement()
self.assertTrue(isinstance(msg, messaging.CommandMessageExt))
self.assertEquals(msg.body, None)
self.assertEquals(msg.destination, None)
self.assertEquals(msg.timeToLive, None)
self.assertEquals(msg.timestamp, None)
self.assertEquals(msg.headers, {
'DSId': u'EE0D161D-C11D-25CB-8DBE-3B77B54B55D9'
})
self.assertEquals(msg.clientId, None)
self.assertEquals(msg.messageId, uuid.UUID('c0dfb77c-d6ee-2431-7315-3266e131a866'))
self.assertEquals(msg.correlationId, u'')
self.assertEquals(self.buffer.remaining(), 0)
# now encode the msg to check that encoding is byte for byte the same
buffer = pyamf.encode(msg, encoding=pyamf.AMF3).getvalue()
self.assertEquals(buffer, bytes)
def test_async(self):
pass
def test_getmessage(self):
"""
Tests for `getSmallMessage`
"""
for cls in ['AbstractMessage', 'ErrorMessage', 'RemotingMessage']:
cls = getattr(messaging, cls)
self.assertRaises(NotImplementedError, cls().getSmallMessage)
kwargs = {
'body': {'foo': 'bar'},
'clientId': 'spam',
'destination': 'eggs',
'headers': {'blarg': 'whoop'},
'messageId': 'baz',
'timestamp': 1234,
'timeToLive': 99
}
# test async
a = messaging.AsyncMessage(correlationId='yay', **kwargs)
m = a.getSmallMessage()
k = kwargs.copy()
k.update({'correlationId': 'yay'})
self.assertTrue(isinstance(m, messaging.AsyncMessageExt))
self.assertEquals(m.__dict__, k)
# test command
a = messaging.CommandMessage(operation='yay', **kwargs)
m = a.getSmallMessage()
k = kwargs.copy()
k.update({'operation': 'yay', 'correlationId': None, 'messageRefType': None})
self.assertTrue(isinstance(m, messaging.CommandMessageExt))
self.assertEquals(m.__dict__, k)
# test ack
a = messaging.AcknowledgeMessage(**kwargs)
m = a.getSmallMessage()
k = kwargs.copy()
k.update({'correlationId': None})
self.assertTrue(isinstance(m, messaging.AcknowledgeMessageExt))
self.assertEquals(m.__dict__, k)
def suite():
suite = unittest.TestSuite()
test_cases = [
AbstractMessageTestCase,
EncodingTestCase,
SmallMessageTestCase
]
for tc in test_cases:
suite.addTest(unittest.makeSuite(tc))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| {
"content_hash": "ecc6b88dc33299e3fcb48a7cde6dfba2",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 171,
"avg_line_length": 33.71634615384615,
"alnum_prop": 0.6331099386852987,
"repo_name": "cardmagic/PyAMF",
"id": "4bb52989c79af80edc0ad1da422ea7128c71ad64",
"size": "7118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyamf/tests/test_flex_messaging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "87097"
},
{
"name": "C",
"bytes": "635399"
},
{
"name": "Java",
"bytes": "374"
},
{
"name": "Python",
"bytes": "955083"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import pandas as pd
from glob import glob
import numpy as np
from itertools import combinations
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import RandomizedSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.feature_selection import RFECV
from scipy.stats import randint as sp_randint
import h5py as hdf
# get the files
files = glob('../redshifts/*_redshifts.csv')
# load the frames into an array
frames = [pd.read_csv(f) for f in files]
# concat all of the frames together
result = pd.concat(frames, ignore_index=True)
# find the results that aren't nan
mask = ~np.isnan(result.Q)
result = result[mask]
# build the features
features = pd.DataFrame()
# mags first
features = result[['u', 'g', 'r', 'i', 'z']]
# colors
colors = combinations('ugriz', 2)
for i in colors:
features['%s-%s' %
(i[0], i[1])] = result.loc[:, i[0]] - result.loc[:, i[1]]
# colors squared
colors = combinations('ugriz', 2)
for i in colors:
features['(%s-%s)2' %
(i[0], i[1])] = (result.loc[:, i[0]] - result.loc[:, i[1]])**2
# make datasets
X = features.values
y = result.Q.values
# scaler = preprocessing.StandardScaler().fit(X)
# X = scaler.transform(X)
X = preprocessing.scale(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# # use a full grid over all parameters -- for the random search
param_grid = {"max_depth": [3, None],
#"max_features": sp_randint(1, 25),
'max_features': ['auto'],
"min_samples_split": sp_randint(2, 25),
"min_samples_leaf": sp_randint(2, 10),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
"n_estimators": sp_randint(5, 100)}
best_score = 0.0
for i in range(2):
# print("# Tuning hyper-parameters for %s" % score)
# print()
# clf = GridSearchCV(RandomForestClassifier(),
# param_grid=param_grid,
# cv=5, scoring='%s_weighted' % score, n_jobs=-1)
# perform a randomized grid search for the best possible
n_iter_search = 50
clf = RandomizedSearchCV(RandomForestClassifier(),
param_distributions=param_grid,
n_iter=n_iter_search,
cv=3,
scoring='accuracy',
n_jobs=-1)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print(clf.best_params_)
print()
#print("Detailed classification report:")
#print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
print(confusion_matrix(y_true, y_pred))
print(best_score, clf.best_score_)
if i == 1:
break
else:
best_score = clf.best_score_
# remove some features
rfecv = RFECV(estimator=clf.best_estimator_,
step=1,
cv=2,
scoring='accuracy')
rfecv.fit(X_train, y_train)
print("Optimal number of features : %d" % rfecv.n_features_)
X_train = rfecv.transform(X_train)
X_test = rfecv.transform(X_test)
for j in range(5):
break
print(j)
magDict = {}
with hdf.File('./truth/truth' + str(j).zfill(2) + '_Oii.hdf5', 'r') as f:
dset = f['truth%s_Oii' % (str(j).zfill(2))]
magDict['u'] = dset['OMAG'][:, 0] # u band
magDict['g'] = dset['OMAG'][:, 1] # g band
magDict['r'] = dset['OMAG'][:, 2] # r band
magDict['i'] = dset['OMAG'][:, 3] # i band
magDict['z'] = dset['OMAG'][:, 4] # z band
# we only want the g mag < 22 galaxies
mask = np.where(magDict['g'] < 22)[0]
print(mask.size)
# create a data array for everything to fit into
#features = -np.ones((mask.size, 25))
data = -np.ones((mask.size, 25))
# mags
for i, m in enumerate('ugriz'):
data[:, i] = magDict[m][mask]
# colors
colors = combinations('ugriz', 2)
for i, c in enumerate(colors):
data[:, i + 5] = magDict[c[0]][mask] - magDict[c[1]][mask]
# colors squared
data[:, i + 15] = (magDict[c[0]][mask] - magDict[c[1]][mask])**2
# data = scaler.transform(data)
data = preprocessing.scale(data)
data = rfecv.transform(data)
# now we make the predictions based on the new features we've created
Qs = clf.predict(data)
print(np.where(Qs == 0)[0].size / float(Qs.size))
print(np.where(Qs == 1)[0].size / float(Qs.size))
print(np.where(Qs == 2)[0].size / float(Qs.size))
# with hdf.File('./truth/truth'+str(j).zfill(2)+'_Oii.hdf5', 'a') as f:
# values = -np.ones(magDict['u'].size)
# values[mask] = Qs
# f['Q'] = values
| {
"content_hash": "712cc4ae550780586e901155b930194d",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 77,
"avg_line_length": 33.22875816993464,
"alnum_prop": 0.5871361132966169,
"repo_name": "boada/vpCluster",
"id": "95cdf2264ea58f75c2e4ae070a4ab1fc6b3bcd51",
"size": "5084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/boada/analysis_all/MLmethods/mkObservations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1096"
},
{
"name": "C",
"bytes": "11445"
},
{
"name": "IDL",
"bytes": "47873"
},
{
"name": "PostScript",
"bytes": "60669635"
},
{
"name": "Python",
"bytes": "359734"
},
{
"name": "TeX",
"bytes": "35070"
}
],
"symlink_target": ""
} |
from nitrile import * | {
"content_hash": "b91c49134603e75953a5feaa1e1d737c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 21,
"alnum_prop": 0.8095238095238095,
"repo_name": "barrettedwards/nitrile",
"id": "91853fb7883347e9a35b77fb11bcf59a56c11a59",
"size": "21",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nitrile/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104172"
}
],
"symlink_target": ""
} |
"""Lookup table operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import lookup_ops
# pylint: disable=unused-import
from tensorflow.python.ops.lookup_ops import FastHashSpec
from tensorflow.python.ops.lookup_ops import HasherSpec
from tensorflow.python.ops.lookup_ops import HashTable
from tensorflow.python.ops.lookup_ops import IdTableWithHashBuckets
from tensorflow.python.ops.lookup_ops import index_table_from_file
from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file
from tensorflow.python.ops.lookup_ops import InitializableLookupTableBase
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
from tensorflow.python.ops.lookup_ops import LookupInterface
from tensorflow.python.ops.lookup_ops import StrongHashSpec
from tensorflow.python.ops.lookup_ops import TableInitializerBase
from tensorflow.python.ops.lookup_ops import TextFileIdTableInitializer
from tensorflow.python.ops.lookup_ops import TextFileIndex
from tensorflow.python.ops.lookup_ops import TextFileInitializer
from tensorflow.python.ops.lookup_ops import TextFileStringTableInitializer
# pylint: enable=unused-import
from tensorflow.python.training.saver import BaseSaverBuilder
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-04-10", "Use `index_table_from_file`.")
def string_to_index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
return index_table_from_file(
vocabulary_file, num_oov_buckets, vocab_size, default_value, hasher_spec,
key_dtype=dtypes.string, name=name)
@deprecated("2017-04-10", "Use `index_table_from_tensor`.")
def string_to_index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
with ops.name_scope(name, "string_to_index") as scope:
mapping = ops.convert_to_tensor(mapping)
if dtypes.string != mapping.dtype.base_dtype:
raise ValueError("string_to_index_table_from_tensor requires string.")
return index_table_from_tensor(
mapping, num_oov_buckets, default_value, hasher_spec, name=scope)
def index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `mapping` 1-D tensor
where each element is a key and corresponding index within the tensor is the
value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=mapping_strings, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2]
```
Args:
mapping: A 1-D `Tensor` that specifies the mapping of keys to indices. The
type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_table_from_tensor(
vocabulary_list=mapping,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
hasher_spec=hasher_spec,
dtype=dtype,
name=name)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_table_from_tensor and call the lookup "
"method of the returned table.")
def string_to_index(tensor, mapping, default_value=-1, name=None):
"""Maps `tensor` of strings into `int64` indices based on `mapping`.
This operation converts `tensor` of strings into `int64` indices.
The mapping is initialized from a string `mapping` tensor where each element
is a key and corresponding index within the tensor is the value.
Any entry in the input which does not have a corresponding entry in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
Elements in `mapping` cannot be duplicated, otherwise the initialization
will throw a FailedPreconditionError.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` once.
For example:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
feats = tf.constant(["emerson", "lake", "and", "palmer"])
ids = tf.contrib.lookup.string_to_index(
feats, mapping=mapping_strings, default_value=-1)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, -1, 2]
```
Args:
tensor: A 1-D input `Tensor` with the strings to map to indices.
mapping: A 1-D string `Tensor` that specifies the mapping of strings to
indices.
default_value: The `int64` value to use for out-of-vocabulary strings.
Defaults to -1.
name: A name for this op (optional).
Returns:
The mapped indices. It has the same shape and tensor type (dense or sparse)
as `tensor`.
"""
table = index_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_string, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `mapping` is not set.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=mapping, default_value=default_value, name=name)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_to_string_table_from_tensor and call the lookup "
"method of the returned table.")
def index_to_string(tensor, mapping, default_value="UNK", name=None):
"""Maps `tensor` of indices into string values based on `mapping`.
This operation converts `int64` indices into string values. The mapping is
initialized from a string `mapping` tensor where each element is a value and
the corresponding index within the tensor is the key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` once.
For example:
```python
mapping_string = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
values = tf.contrib.lookup.index_to_string(
indices, mapping=mapping_string, default_value="UNKNOWN")
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
tensor: A `int64` `Tensor` with the indices to map to strings.
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The string value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The strings values associated to the indices. The resultant dense
feature value tensor has the same shape as the corresponding `indices`.
"""
table = index_to_string_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
class MutableHashTable(LookupInterface):
"""A generic mutable hash table implementation.
Data can be inserted by calling the insert method. It does not support
initialization via the init method.
Example usage:
```python
table = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string,
value_dtype=tf.int64,
default_value=-1)
table.insert(keys, values)
out = table.lookup(query_keys)
print(out.eval())
```
"""
def __init__(self,
key_dtype,
value_dtype,
default_value,
shared_name=None,
name="MutableHashTable",
checkpoint=True):
"""Creates an empty `MutableHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(default_value,
dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = checkpoint and shared_name is None
if self._default_value.get_shape().ndims == 0:
self._table_ref = gen_lookup_ops.mutable_hash_table_v2(
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=key_dtype,
value_dtype=value_dtype,
name=name)
else:
self._table_ref = gen_lookup_ops.mutable_hash_table_of_tensors_v2(
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=key_dtype,
value_dtype=value_dtype,
value_shape=self._default_value.get_shape(),
name=name)
super(MutableHashTable, self).__init__(key_dtype, value_dtype,
self._table_ref.op.name.split(
"/")[-1])
if checkpoint:
saveable = MutableHashTable._Saveable(self, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
return gen_lookup_ops.lookup_table_size_v2(self._table_ref, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_lookup_table_find" % self._name,
(self._table_ref, keys, self._default_value)) as name:
with ops.colocate_with(self._table_ref):
values = gen_lookup_ops.lookup_table_find_v2(
self._table_ref, keys, self._default_value, name=name)
values.set_shape(keys.get_shape().concatenate(self._value_shape))
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
# pylint: disable=protected-access
lookup_ops._check_table_dtypes(self, keys.dtype, values.dtype)
# pylint: enable=protected-access
with ops.name_scope(name, "%s_lookup_table_insert" % self._name,
[self._table_ref, keys, values]) as name:
with ops.colocate_with(self._table_ref):
# pylint: disable=protected-access
op = gen_lookup_ops.lookup_table_insert_v2(
self._table_ref, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self._table_ref, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for MutableHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(MutableHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, unused_restored_shapes):
# pylint: disable=protected-access
with ops.colocate_with(self.op._table_ref):
return gen_lookup_ops.lookup_table_import_v2(
self.op._table_ref, restored_tensors[0], restored_tensors[1])
class MutableDenseHashTable(LookupInterface):
"""A generic mutable hash table implementation using tensors as backing store.
Data can be inserted by calling the insert method. It does not support
initialization via the init method.
It uses "open addressing" with quadratic reprobing to resolve collisions.
Compared to `MutableHashTable` the insert and lookup operations in a
`MutableDenseHashTable` are typically faster, but memory usage can be higher.
However, `MutableDenseHashTable` does not require additional memory for
temporary tensors created during checkpointing and restore operations.
Example usage:
```python
table = tf.contrib.lookup.MutableDenseHashTable(key_dtype=tf.int64,
value_dtype=tf.int64,
default_value=-1,
empty_key=0)
sess.run(table.insert(keys, values))
out = table.lookup(query_keys)
print(out.eval())
```
"""
# TODO(andreasst): consider extracting common code with MutableHashTable into
# a common superclass.
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
initial_num_buckets=None,
shared_name=None,
name="MutableDenseHashTable",
checkpoint=True):
"""Creates an empty `MutableDenseHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
empty_key: the key to use to represent empty buckets internally. Must not
be used in insert or lookup operations.
initial_num_buckets: the initial number of buckets.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(
default_value, dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = checkpoint and shared_name is None
empty_key = ops.convert_to_tensor(empty_key, dtype=key_dtype)
self._table_ref = gen_lookup_ops.mutable_dense_hash_table_v2(
empty_key=empty_key,
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
value_dtype=value_dtype,
value_shape=self._value_shape,
initial_num_buckets=initial_num_buckets,
name=name)
super(MutableDenseHashTable, self).__init__(
key_dtype, value_dtype, self._table_ref.op.name.split("/")[-1])
if checkpoint:
saveable = MutableDenseHashTable._Saveable(self, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
return gen_lookup_ops.lookup_table_size_v2(self._table_ref, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_lookup_table_find" % self._name,
[self._table_ref, keys]) as name:
with ops.colocate_with(self._table_ref):
values = gen_lookup_ops.lookup_table_find_v2(
self._table_ref, keys, self._default_value, name=name)
if keys.get_shape().ndims is not None and keys.get_shape().ndims > 0:
values.set_shape(
tensor_shape.TensorShape([keys.get_shape().dims[0]]).concatenate(
self._value_shape))
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
# pylint: disable=protected-access
lookup_ops._check_table_dtypes(self, keys.dtype, values.dtype)
# pylint: enable=protected-access
with ops.name_scope(name, "%s_lookup_table_insert" % self._name,
[self._table_ref, keys, values]) as name:
with ops.colocate_with(self._table_ref):
op = gen_lookup_ops.lookup_table_insert_v2(
self._table_ref, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
[self._table_ref]) as name:
with ops.colocate_with(self._table_ref):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self._table_ref, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for MutableDenseHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(MutableDenseHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, unused_restored_shapes):
# pylint: disable=protected-access
with ops.colocate_with(self.op._table_ref):
return gen_lookup_ops.lookup_table_import_v2(
self.op._table_ref, restored_tensors[0], restored_tensors[1])
| {
"content_hash": "af48e58b9956b91d7e5ab25c97dbfc96",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 80,
"avg_line_length": 38.43702579666161,
"alnum_prop": 0.6609159099881563,
"repo_name": "Xeralux/tensorflow",
"id": "a03e731be32c5964cb4aece8e8a67525883a4e7c",
"size": "26019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/lookup/lookup_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340972"
},
{
"name": "C++",
"bytes": "39479562"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33675501"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425916"
}
],
"symlink_target": ""
} |
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.qtw import QtHelp
from ginga.qtw import FitsImageCanvasTypesQt as CanvasTypes
from ginga import GingaPlugin
draw_colors = ['white', 'black', 'red', 'yellow', 'blue', 'green', 'pink',
'cyan', 'magenta', 'turquoise', 'aquamarine', 'purple']
default_drawtype = 'point'
default_drawcolor = 'blue'
class Drawing(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Drawing, self).__init__(fv, fitsimage)
self.layertag = 'drawing-canvas'
canvas = CanvasTypes.DrawingCanvas()
canvas.enable_draw(True)
canvas.set_drawtype('point', color='cyan')
canvas.set_callback('draw-event', self.draw_cb)
canvas.setSurface(self.fitsimage)
self.canvas = canvas
self.w = None
self.drawtypes = canvas.get_drawtypes()
self.drawcolors = draw_colors
def build_gui(self, container):
sw = QtGui.QScrollArea()
twidget = QtHelp.VBox()
sp = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding,
QtGui.QSizePolicy.Fixed)
twidget.setSizePolicy(sp)
vbox1 = twidget.layout()
vbox1.setContentsMargins(4, 4, 4, 4)
vbox1.setSpacing(2)
sw.setWidgetResizable(True)
sw.setWidget(twidget)
msgFont = QtGui.QFont("Sans", 14)
tw = QtGui.QLabel()
tw.setFont(msgFont)
tw.setWordWrap(True)
self.tw = tw
fr = QtHelp.Frame("Instructions")
fr.layout().addWidget(tw, stretch=1, alignment=QtCore.Qt.AlignTop)
vbox1.addWidget(fr, stretch=0, alignment=QtCore.Qt.AlignTop)
fr = QtHelp.Frame("Drawing")
captions = (('Draw type', 'combobox'), ('Draw color', 'combobox'),
('Clear canvas', 'button'))
w, b = QtHelp.build_info(captions)
self.w = b
combobox = b.draw_type
options = []
index = 0
for name in self.drawtypes:
options.append(name)
combobox.addItem(name)
index += 1
index = self.drawtypes.index(default_drawtype)
combobox.setCurrentIndex(index)
combobox.activated.connect(self.set_drawparams)
self.w.draw_color = b.draw_color
combobox = b.draw_color
options = []
index = 0
self.drawcolors = draw_colors
for name in self.drawcolors:
options.append(name)
combobox.addItem(name)
index += 1
index = self.drawcolors.index(default_drawcolor)
combobox.setCurrentIndex(index)
combobox.activated.connect(self.set_drawparams)
b.clear_canvas.clicked.connect(self.clear_canvas)
fr.layout().addWidget(w, stretch=1, alignment=QtCore.Qt.AlignLeft)
vbox1.addWidget(fr, stretch=0, alignment=QtCore.Qt.AlignTop)
btns = QtHelp.HBox()
layout = btns.layout()
layout.setSpacing(3)
btn = QtGui.QPushButton("Close")
btn.clicked.connect(self.close)
layout.addWidget(btn, stretch=0, alignment=QtCore.Qt.AlignLeft)
vbox1.addWidget(btns, stretch=0, alignment=QtCore.Qt.AlignLeft)
container.addWidget(sw, stretch=1)
def set_drawparams(self):
index = self.w.draw_type.currentIndex()
kind = self.drawtypes[index]
index = self.w.draw_color.currentIndex()
drawparams = { 'color': self.drawcolors[index],
}
self.canvas.set_drawtype(kind, **drawparams)
def clear_canvas(self):
self.canvas.deleteAllObjects()
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_operation_channel(chname, str(self))
return True
def instructions(self):
self.tw.setText("""Draw a figure with the right mouse button.""")
def start(self):
self.instructions()
self.set_drawparams()
# insert layer if it is not already
try:
obj = self.fitsimage.getObjectByTag(self.layertag)
except KeyError:
# Add canvas layer
self.fitsimage.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
self.canvas.ui_setActive(True)
self.fv.showStatus("Draw a figure with the right mouse button")
def stop(self):
# remove the canvas from the image
## try:
## self.fitsimage.deleteObjectByTag(self.layertag)
## except:
## pass
self.canvas.ui_setActive(False)
self.fv.showStatus("")
def redo(self):
pass
def draw_cb(self, fitsimage, tag):
# TODO: record information about objects drawn?
pass
def __str__(self):
return 'drawing'
#END
| {
"content_hash": "80f771b0ca7ea95405f4cff444b85a51",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 74,
"avg_line_length": 30.650306748466257,
"alnum_prop": 0.5960768614891914,
"repo_name": "astrofrog/ginga",
"id": "04980a148792fa45bb1379fde02508bbe442a2d4",
"size": "5252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/qtw/plugins/Drawing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1548520"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from ..._operations._operations import (
build_create_ledger_entry_request,
build_create_or_update_user_request,
build_delete_user_request,
build_get_constitution_request,
build_get_current_ledger_entry_request,
build_get_enclave_quotes_request,
build_get_ledger_entry_request,
build_get_receipt_request,
build_get_transaction_status_request,
build_get_user_request,
build_list_collections_request,
build_list_consortium_members_request,
build_list_ledger_entries_request,
)
from .._vendor import MixinABC
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConfidentialLedgerClientOperationsMixin(MixinABC):
@distributed_trace_async
async def get_constitution(self, **kwargs: Any) -> JSON:
"""Gets the constitution used for governance.
The constitution is a script that assesses and applies proposals from consortium members.
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"digest": "str", # SHA256 digest of the constitution script. Required.
"script": "str" # Contents of the constitution. Required.
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_constitution_request(
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@distributed_trace
def list_consortium_members(self, **kwargs: Any) -> AsyncIterable[JSON]:
"""Lists the consortium members.
Consortium members can manage the Confidential Ledger.
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"certificate": "str", # PEM-encoded certificate associated with the member.
Required.
"id": "str" # Identifier assigned to the member. Required.
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_consortium_members_request(
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["members"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace_async
async def get_enclave_quotes(self, **kwargs: Any) -> JSON:
"""Gets quotes for all nodes of the Confidential Ledger.
A quote is an SGX enclave measurement that can be used to verify the validity of a node and its
enclave.
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"currentNodeId": "str", # Id of the Confidential Ledger node responding to
the request. Required.
"enclaveQuotes": {
"str": {
"mrenclave": "str", # Optional. MRENCLAVE value of the code
running in the enclave.
"nodeId": "str", # ID assigned to this node. Required.
"quoteVersion": "str", # Version of the quote presented.
Required.
"raw": "str" # Raw SGX quote, parsable by tools like Open
Enclave's oeverify. Required.
}
}
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_enclave_quotes_request(
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@distributed_trace
def list_collections(self, **kwargs: Any) -> AsyncIterable[JSON]:
"""Retrieves a list of collection ids present in the Confidential Ledger.
Collection ids are user-created collections of ledger entries.
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"collectionId": "str" # Required.
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_collections_request(
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["collections"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace
def list_ledger_entries(
self,
*,
collection_id: Optional[str] = None,
from_transaction_id: Optional[str] = None,
to_transaction_id: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[JSON]:
"""Gets ledger entries from a collection corresponding to a range.
A collection id may optionally be specified. Only entries in the specified (or default)
collection will be returned.
:keyword collection_id: The collection id. Default value is None.
:paramtype collection_id: str
:keyword from_transaction_id: Specify the first transaction ID in a range. Default value is
None.
:paramtype from_transaction_id: str
:keyword to_transaction_id: Specify the last transaction ID in a range. Default value is None.
:paramtype to_transaction_id: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"collectionId": "str", # Optional.
"contents": "str", # Contents of the ledger entry. Required.
"transactionId": "str" # Optional. A unique identifier for the state of the
ledger. If returned as part of a LedgerEntry, it indicates the state from which
the entry was read.
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_ledger_entries_request(
collection_id=collection_id,
from_transaction_id=from_transaction_id,
to_transaction_id=to_transaction_id,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["entries"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@overload
async def create_ledger_entry(
self, entry: JSON, *, collection_id: Optional[str] = None, content_type: str = "application/json", **kwargs: Any
) -> JSON:
"""Writes a ledger entry.
A collection id may optionally be specified.
:param entry: Ledger entry. Required.
:type entry: JSON
:keyword collection_id: The collection id. Default value is None.
:paramtype collection_id: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
entry = {
"collectionId": "str", # Optional.
"contents": "str", # Contents of the ledger entry. Required.
"transactionId": "str" # Optional. A unique identifier for the state of the
ledger. If returned as part of a LedgerEntry, it indicates the state from which
the entry was read.
}
# response body for status code(s): 200
response == {
"collectionId": "str" # Required.
}
"""
@overload
async def create_ledger_entry(
self, entry: IO, *, collection_id: Optional[str] = None, content_type: str = "application/json", **kwargs: Any
) -> JSON:
"""Writes a ledger entry.
A collection id may optionally be specified.
:param entry: Ledger entry. Required.
:type entry: IO
:keyword collection_id: The collection id. Default value is None.
:paramtype collection_id: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"collectionId": "str" # Required.
}
"""
@distributed_trace_async
async def create_ledger_entry(
self, entry: Union[JSON, IO], *, collection_id: Optional[str] = None, **kwargs: Any
) -> JSON:
"""Writes a ledger entry.
A collection id may optionally be specified.
:param entry: Ledger entry. Is either a model type or a IO type. Required.
:type entry: JSON or IO
:keyword collection_id: The collection id. Default value is None.
:paramtype collection_id: str
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"collectionId": "str" # Required.
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(entry, (IO, bytes)):
_content = entry
else:
_json = entry
request = build_create_ledger_entry_request(
collection_id=collection_id,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers["x-ms-ccf-transaction-id"] = self._deserialize(
"str", response.headers.get("x-ms-ccf-transaction-id")
)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), response_headers)
return cast(JSON, deserialized)
@distributed_trace_async
async def get_ledger_entry(
self, transaction_id: str, *, collection_id: Optional[str] = None, **kwargs: Any
) -> JSON:
"""Gets the ledger entry at the specified transaction id. A collection id may optionally be
specified to indicate the collection from which to fetch the value.
To return older ledger entries, the relevant sections of the ledger must be read from disk and
validated. To prevent blocking within the enclave, the response will indicate whether the entry
is ready and part of the response, or if the loading is still ongoing.
:param transaction_id: Identifies a write transaction. Required.
:type transaction_id: str
:keyword collection_id: The collection id. Default value is None.
:paramtype collection_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"entry": {
"collectionId": "str", # Optional. The ledger entry found as a
result of the query. This is only available if the query is in Ready state.
"contents": "str", # Contents of the ledger entry. Required.
"transactionId": "str" # Optional. A unique identifier for the state
of the ledger. If returned as part of a LedgerEntry, it indicates the state
from which the entry was read.
},
"state": "str" # State of a ledger query. Required. Known values are:
"Loading" and "Ready".
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_ledger_entry_request(
transaction_id=transaction_id,
collection_id=collection_id,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@distributed_trace_async
async def get_receipt(self, transaction_id: str, **kwargs: Any) -> JSON:
"""Gets a receipt certifying ledger contents at a particular transaction id.
Gets a receipt certifying ledger contents at a particular transaction id.
:param transaction_id: Identifies a write transaction. Required.
:type transaction_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"receipt": {
"cert": "str", # Optional.
"leaf": "str", # Optional.
"leafComponents": {
"claimsDigest": "str", # Optional.
"commitEvidence": "str", # Optional.
"writeSetDigest": "str" # Optional.
},
"nodeId": "str", # Required.
"proof": [
{
"left": "str", # Optional. Required.
"right": "str" # Optional. Required.
}
],
"root": "str", # Optional.
"serviceEndorsements": [
"str" # Optional.
],
"signature": "str" # Required.
},
"state": "str", # State of a ledger query. Required. Known values are:
"Loading" and "Ready".
"transactionId": "str" # A unique identifier for the state of the ledger. If
returned as part of a LedgerEntry, it indicates the state from which the entry
was read. Required.
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_receipt_request(
transaction_id=transaction_id,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@distributed_trace_async
async def get_transaction_status(self, transaction_id: str, **kwargs: Any) -> JSON:
"""Gets the status of an entry identified by a transaction id.
Gets the status of an entry identified by a transaction id.
:param transaction_id: Identifies a write transaction. Required.
:type transaction_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"state": "str", # Represents the state of the transaction. Required. Known
values are: "Committed" and "Pending".
"transactionId": "str" # A unique identifier for the state of the ledger. If
returned as part of a LedgerEntry, it indicates the state from which the entry
was read. Required.
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_transaction_status_request(
transaction_id=transaction_id,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@distributed_trace_async
async def get_current_ledger_entry(self, *, collection_id: Optional[str] = None, **kwargs: Any) -> JSON:
"""Gets the current value available in the ledger.
A collection id may optionally be specified.
:keyword collection_id: The collection id. Default value is None.
:paramtype collection_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"collectionId": "str", # Optional.
"contents": "str", # Contents of the ledger entry. Required.
"transactionId": "str" # Optional. A unique identifier for the state of the
ledger. If returned as part of a LedgerEntry, it indicates the state from which
the entry was read.
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_current_ledger_entry_request(
collection_id=collection_id,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@distributed_trace_async
async def delete_user(self, user_id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements
"""Deletes a user from the Confidential Ledger.
Deletes a user from the Confidential Ledger.
:param user_id: The user id, either an AAD object ID or certificate fingerprint. Required.
:type user_id: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_user_request(
user_id=user_id,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace_async
async def get_user(self, user_id: str, **kwargs: Any) -> JSON:
"""Gets a user.
Gets a user.
:param user_id: The user id, either an AAD object ID or certificate fingerprint. Required.
:type user_id: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"assignedRole": "str", # Represents an assignable role. Required. Known
values are: "Administrator", "Contributor", and "Reader".
"userId": "str" # Optional. Identifier for the user. This must either be an
AAD object id or a certificate fingerprint.
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_user_request(
user_id=user_id,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@overload
async def create_or_update_user(
self, user_id: str, user_details: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any
) -> JSON:
"""Adds a user or updates a user's fields.
A JSON merge patch is applied for existing users.
:param user_id: The user id, either an AAD object ID or certificate fingerprint. Required.
:type user_id: str
:param user_details: Details about a Confidential Ledger user. Required.
:type user_details: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/merge-patch+json".
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
user_details = {
"assignedRole": "str", # Represents an assignable role. Required. Known
values are: "Administrator", "Contributor", and "Reader".
"userId": "str" # Optional. Identifier for the user. This must either be an
AAD object id or a certificate fingerprint.
}
# response body for status code(s): 200
response == {
"assignedRole": "str", # Represents an assignable role. Required. Known
values are: "Administrator", "Contributor", and "Reader".
"userId": "str" # Optional. Identifier for the user. This must either be an
AAD object id or a certificate fingerprint.
}
"""
@overload
async def create_or_update_user(
self, user_id: str, user_details: IO, *, content_type: str = "application/merge-patch+json", **kwargs: Any
) -> JSON:
"""Adds a user or updates a user's fields.
A JSON merge patch is applied for existing users.
:param user_id: The user id, either an AAD object ID or certificate fingerprint. Required.
:type user_id: str
:param user_details: Details about a Confidential Ledger user. Required.
:type user_details: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/merge-patch+json".
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"assignedRole": "str", # Represents an assignable role. Required. Known
values are: "Administrator", "Contributor", and "Reader".
"userId": "str" # Optional. Identifier for the user. This must either be an
AAD object id or a certificate fingerprint.
}
"""
@distributed_trace_async
async def create_or_update_user(self, user_id: str, user_details: Union[JSON, IO], **kwargs: Any) -> JSON:
"""Adds a user or updates a user's fields.
A JSON merge patch is applied for existing users.
:param user_id: The user id, either an AAD object ID or certificate fingerprint. Required.
:type user_id: str
:param user_details: Details about a Confidential Ledger user. Is either a model type or a IO
type. Required.
:type user_details: JSON or IO
:keyword content_type: Body Parameter content-type. Known values are:
'application/merge-patch+json'. Default value is None.
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"assignedRole": "str", # Represents an assignable role. Required. Known
values are: "Administrator", "Contributor", and "Reader".
"userId": "str" # Optional. Identifier for the user. This must either be an
AAD object id or a certificate fingerprint.
}
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
content_type = content_type or "application/merge-patch+json"
_json = None
_content = None
if isinstance(user_details, (IO, bytes)):
_content = user_details
else:
_json = user_details
request = build_create_or_update_user_request(
user_id=user_id,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"ledgerEndpoint": self._serialize.url(
"self._config.ledger_endpoint", self._config.ledger_endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
| {
"content_hash": "bba10fec58bc883d42d29aa337651c6c",
"timestamp": "",
"source": "github",
"line_count": 1142,
"max_line_length": 120,
"avg_line_length": 40.69264448336252,
"alnum_prop": 0.5774784274063395,
"repo_name": "Azure/azure-sdk-for-python",
"id": "624f9722d98c763e0d6870578095d870c3bb8c42",
"size": "46971",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/confidentialledger/azure-confidentialledger/azure/confidentialledger/aio/_operations/_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import numpy as np
class Base(object):
@staticmethod
def data_n():
"""
Updates train_n and test_n numeric datasets (used for model data creation) based on numeric datatypes from train and test datasets.
"""
Base.train_n = Base.train.select_dtypes(include=[np.number])
Base.test_n = Base.test.select_dtypes(include=[np.number])
| {
"content_hash": "65f4699df7f33d228ed806da1c50a1f5",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 139,
"avg_line_length": 37.6,
"alnum_prop": 0.6569148936170213,
"repo_name": "Speedml/speedml",
"id": "360371791ebfd81031e3a44888eb182599e8b22d",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "speedml/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38365"
}
],
"symlink_target": ""
} |
from plugin import (utils,
constants,
connection,
)
from cloudify import ctx
from cloudify.decorators import operation
@operation
def create(**_):
"""Create a storage account.
:param ctx: The Cloudify ctx context.
:return: The status code of the REST request.
:rtype: int
"""
utils.validate_node_property(constants.STORAGE_ACCOUNT_KEY, ctx.node.properties)
utils.validate_node_property(constants.ACCOUNT_TYPE_KEY, ctx.node.properties)
azure_config = utils.get_azure_config(ctx)
subscription_id = azure_config[constants.SUBSCRIPTION_KEY]
resource_group_name = azure_config[constants.RESOURCE_GROUP_KEY]
location = azure_config[constants.LOCATION_KEY]
storage_account_name = ctx.node.properties[constants.STORAGE_ACCOUNT_KEY]
account_type = ctx.node.properties[constants.ACCOUNT_TYPE_KEY]
api_version = constants.AZURE_API_VERSION_05_preview
ctx.logger.info('Checking availability storage_account_name ' + str(storage_account_name))
availability = availability_account_name(ctx=ctx)
if (not bool(availability['nameAvailable'])):
if (availability['reason'] == 'AlreadyExists'):
ctx.logger.info("storage_account_name " + str(storage_account_name) + " already exist")
ctx.logger.info(str(availability['message']))
return 409
elif (availability['reason'] == 'Invalid'):
ctx.logger.info("storage_account_name " + str(storage_account_name) + " invalid name")
ctx.logger.info(str(availability['message']))
return 400
# Place the storage name in runtime_properties for relationships
ctx.instance.runtime_properties[constants.STORAGE_ACCOUNT_KEY] = storage_account_name
json ={
"location": str(location),
"properties": {
"accountType": str(account_type)
}
}
ctx.logger.info('Creating Storage Account')
connect = connection.AzureConnectionClient()
response = connect.azure_put(ctx,
("subscriptions/{}/resourceGroups/{}/" +
"providers/Microsoft.Storage" +
"/storageAccounts/{}" +
"?api-version={}").format(
subscription_id,
resource_group_name,
storage_account_name,
api_version
),
json=json
)
utils.wait_status(ctx, 'storage')
return response.status_code
@operation
def delete(**_):
"""Delete a storage account.
:param ctx: The Cloudify ctx context.
:return: The status code of the REST request.
:rtype: int
"""
utils.validate_node_property(constants.STORAGE_ACCOUNT_KEY, ctx.node.properties)
utils.validate_node_property(constants.DELETABLE_KEY, ctx.node.properties)
azure_config = utils.get_azure_config(ctx)
subscription_id = azure_config[constants.SUBSCRIPTION_KEY]
resource_group_name = azure_config[constants.RESOURCE_GROUP_KEY]
storage_account_name = ctx.node.properties[constants.STORAGE_ACCOUNT_KEY]
api_version = constants.AZURE_API_VERSION_05_preview
deletable = ctx.node.properties[constants.DELETABLE_KEY]
if deletable:
ctx.logger.info('Propertie deletable set to True.')
ctx.logger.info('Deleting Storage Account {}.'.format(storage_account_name))
connect = connection.AzureConnectionClient()
response = connect.azure_delete(ctx,
("subscriptions/{}/resourceGroups/{}/" +
"providers/Microsoft.Storage" +
"/storageAccounts/{}" +
"?api-version={}").format(
subscription_id,
resource_group_name,
storage_account_name,
api_version
)
)
return response.status_code
else:
ctx.logger.info('Propertie deletable set to False.')
ctx.logger.info('Not deleting storage account {}.'.format(storage_account_name))
return 0
def get_provisioning_state(**_):
"""Get the provisioning state of a storage account.
:param ctx: The Cloudify ctx context.
:return: The provisioning state of a storage account.
:rtype: string
"""
utils.validate_node_property(constants.STORAGE_ACCOUNT_KEY, ctx.node.properties)
azure_config = utils.get_azure_config(ctx)
subscription_id = azure_config[constants.SUBSCRIPTION_KEY]
resource_group_name = azure_config[constants.RESOURCE_GROUP_KEY]
storage_account_name = ctx.node.properties[constants.STORAGE_ACCOUNT_KEY]
api_version = constants.AZURE_API_VERSION_05_preview
connect = connection.AzureConnectionClient()
response = connect.azure_get(ctx,
("subscriptions/{}/resourceGroups/{}/" +
"providers/Microsoft.Storage" +
"/storageAccounts/{}" +
"?api-version={}").format(
subscription_id,
resource_group_name,
storage_account_name,
api_version
)
)
jsonGet = response.json()
status_storage = jsonGet['properties']['provisioningState']
return status_storage
def availability_account_name(**_):
"""Check the availability of a storage account name.
:param ctx: The Cloudify ctx context.
:return: A dictionary with name availability and the reason.
:rtype: dictionary
"""
utils.validate_node_property(constants.STORAGE_ACCOUNT_KEY, ctx.node.properties)
azure_config = utils.get_azure_config(ctx)
subscription_id = azure_config[constants.SUBSCRIPTION_KEY]
storage_account_name = ctx.node.properties[constants.STORAGE_ACCOUNT_KEY]
api_version = constants.AZURE_API_VERSION_05_preview
json ={
"name": str(storage_account_name),
"type": "Microsoft.Storage/storageAccounts"
}
ctx.logger.debug('JSON: {}'.format(json))
response = connection.AzureConnectionClient().azure_post(ctx,
("subscriptions/{}/" +
"providers/Microsoft.Storage" +
"/checkNameAvailability" +
"?api-version={}").format(
subscription_id,
api_version
),
json=json
)
return response.json()
def get_storage_keys(ctx):
"""Get storage account keys.
:param ctx: The Cloudify ctx context.
:return: A list of keys attached to the storage account. Keys are encoded in base64
:rtype: list
"""
azure_config = utils.get_azure_config(ctx)
subscription_id = azure_config[constants.SUBSCRIPTION_KEY]
resource_group_name = azure_config[constants.RESOURCE_GROUP_KEY]
location = azure_config[constants.LOCATION_KEY]
if constants.STORAGE_ACCOUNT_KEY in ctx.node.properties:
storage_account_name = ctx.node.properties[constants.STORAGE_ACCOUNT_KEY]
else:
storage_account_name = ctx.instance.runtime_properties[constants.STORAGE_ACCOUNT_KEY]
api_version = constants.AZURE_API_VERSION_05_preview
connect = connection.AzureConnectionClient()
ctx.logger.info("Getting storage account keys")
response = connect.azure_post(ctx,
("subscriptions/{}" +
"/resourceGroups/{}" +
"/providers/Microsoft.Storage" +
"/storageAccounts/{}" +
"/listKeys" +
"?api-version={}").format(
subscription_id,
resource_group_name,
storage_account_name,
api_version
),
json={}
)
keys = response.json()
return [keys['key1'], keys['key2']]
| {
"content_hash": "f9b8a3c9d4a799944260f5286b94c4ee",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 99,
"avg_line_length": 34.50877192982456,
"alnum_prop": 0.6137519064565328,
"repo_name": "fastconnect/cloudify-azure-plugin",
"id": "0299321f802ce7a18095cd87313f9191a0c2c9eb",
"size": "8528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin/storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "235110"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from apptools.help.help_plugin.preferences_pages import *
| {
"content_hash": "a1589770fb869070e601faf73b078a8e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 57,
"avg_line_length": 48.5,
"alnum_prop": 0.8041237113402062,
"repo_name": "enthought/etsproxy",
"id": "6d477c6c79e3dab8deee88131dbbddf1c5c90cb4",
"size": "112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/help/help_plugin/preferences_pages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
} |
"""Course explorer module."""
__author__ = 'Rahul Singal ([email protected])'
from common import users
from controllers import utils
from models import custom_modules
from models.config import ConfigProperty
from models.models import StudentProfileDAO
from modules.course_explorer import messages
from modules.course_explorer import student
GCB_ENABLE_COURSE_EXPLORER_PAGE = ConfigProperty(
'gcb_enable_course_explorer_page', bool,
messages.SITE_SETTINGS_COURSE_EXPLORER, default_value=False,
label='Course Explorer', multiline=False, validator=None)
custom_module = None
class ExplorerPageInitializer(utils.PageInitializer):
"""Page initializer for explorer page.
Allow links to the course explorer to be added
to the navbars of all course pages.
"""
@classmethod
def initialize(cls, template_values):
template_values.update(
{'show_course_explorer_tab': GCB_ENABLE_COURSE_EXPLORER_PAGE.value})
user = users.get_current_user()
if user:
profile = StudentProfileDAO.get_profile_by_user_id(
users.get_current_user().user_id())
template_values.update({'has_global_profile': profile is not None})
def register_module():
"""Registers this module in the registry."""
# set the page initializer
utils.PageInitializerService.set(ExplorerPageInitializer)
# setup routes
explorer_routes = [
('/', student.IndexPageHandler),
('/explorer', student.AllCoursesHandler),
(r'/explorer/assets/(.*)', student.AssetsHandler),
('/explorer/courses', student.RegisteredCoursesHandler),
('/explorer/profile', student.ProfileHandler)]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course Explorer',
'A set of pages for delivering an online course.',
explorer_routes, [])
return custom_module
def unregister_module():
"""Unregisters this module in the registry."""
# set the page intializer to default.
utils.PageInitializerService.set(utils.DefaultPageInitializer)
return custom_modules
| {
"content_hash": "8170361ec83fe72496565a81e1ae8673",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.7007400555041629,
"repo_name": "ram8647/gcb-mobilecsp",
"id": "5053daff75acb494f6bee0692fcdec22fc2375b0",
"size": "2760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/course_explorer/course_explorer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "122290"
},
{
"name": "HTML",
"bytes": "486625"
},
{
"name": "JavaScript",
"bytes": "620039"
},
{
"name": "Python",
"bytes": "5013996"
},
{
"name": "Shell",
"bytes": "36511"
}
],
"symlink_target": ""
} |
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import initializers
from tensorflow.python.keras import layers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
"safe_cumprod",
"monotonic_attention",
"BahdanauMonotonicAttention",
"LuongMonotonicAttention",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
@property
def alignments_size(self):
raise NotImplementedError
@property
def state_size(self):
raise NotImplementedError
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
custom_key_value_fn=None,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.compat.v1.layers.Layer`. The
layer's depth must match the depth of `memory_layer`. If `query_layer`
is not provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be: `probabilities =
probability_fn(score, state)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.compat.v1.layers.Layer` (may be None). The
layer's depth must match the depth of `query_layer`. If `memory_layer`
is not provided, the shape of `memory` must match that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
custom_key_value_fn: (optional): The custom function for
computing keys and values.
name: Name to use when creating ops.
"""
if (query_layer is not None and
not isinstance(query_layer, layers_base.Layer)):
raise TypeError("query_layer is not a Layer: %s" %
type(query_layer).__name__)
if (memory_layer is not None and
not isinstance(memory_layer, layers_base.Layer)):
raise TypeError("memory_layer is not a Layer: %s" %
type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
if score_mask_value is None:
score_mask_value = dtypes.as_dtype(
self._memory_layer.dtype).as_numpy_dtype(-np.inf)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(
score,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value), prev))
with ops.name_scope(name, "BaseAttentionMechanismInit",
nest.flatten(memory)):
self._values = _prepare_memory(
memory,
memory_sequence_length=memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
if custom_key_value_fn is not None:
self._keys, self._values = custom_key_value_fn(self._keys, self._values)
self._batch_size = (
tensor_shape.dimension_value(self._keys.shape[0]) or
array_ops.shape(self._keys)[0])
self._alignments_size = (
tensor_shape.dimension_value(self._keys.shape[1]) or
array_ops.shape(self._keys)[1])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
class _BaseAttentionMechanismV2(AttentionMechanism, layers.Layer):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
Note that this layer takes memory as its init parameter, which is an
anti-pattern of Keras API, we have to keep the memory as init parameter for
performance and dependency reason. Under the hood, during `__init__()`, it
will invoke `base_layer.__call__(memory, setup_memory=True)`. This will let
keras to keep track of the memory tensor as the input of this layer. Once
the `__init__()` is done, then user can query the attention by
`score = att_obj([query, state])`, and use it as a normal keras layer.
Special attention is needed when adding using this class as the base layer for
new attention:
1. Build() could be invoked at least twice. So please make sure weights are
not duplicated.
2. Layer.get_weights() might return different set of weights if the instance
has `query_layer`. The query_layer weights is not initialized until the
memory is configured.
Also note that this layer does not work with Keras model when
`model.compile(run_eagerly=True)` due to the fact that this layer is stateful.
The support for that will be added in a future version.
"""
def __init__(self,
memory,
probability_fn,
query_layer=None,
memory_layer=None,
memory_sequence_length=None,
**kwargs):
"""Construct base AttentionMechanism class.
Args:
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be: `probabilities =
probability_fn(score, state)`.
query_layer: (optional): Instance of `tf.keras.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory_layer: (optional): Instance of `tf.keras.Layer`. The layer's depth
must match the depth of `query_layer`. If `memory_layer` is not
provided, the shape of `memory` must match that of `query_layer`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros for
values past the respective sequence lengths.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
if (query_layer is not None and not isinstance(query_layer, layers.Layer)):
raise TypeError("query_layer is not a Layer: %s" %
type(query_layer).__name__)
if (memory_layer is not None and
not isinstance(memory_layer, layers.Layer)):
raise TypeError("memory_layer is not a Layer: %s" %
type(memory_layer).__name__)
self.query_layer = query_layer
self.memory_layer = memory_layer
if self.memory_layer is not None and "dtype" not in kwargs:
kwargs["dtype"] = self.memory_layer.dtype
super(_BaseAttentionMechanismV2, self).__init__(**kwargs)
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
self.probability_fn = probability_fn
self.keys = None
self.values = None
self.batch_size = None
self._memory_initialized = False
self._check_inner_dims_defined = True
self.supports_masking = True
self.score_mask_value = dtypes.as_dtype(self.dtype).as_numpy_dtype(-np.inf)
if memory is not None:
# Setup the memory by self.__call__() with memory and memory_seq_length.
# This will make the attention follow the keras convention which takes
# all the tensor inputs via __call__().
if memory_sequence_length is None:
inputs = memory
else:
inputs = [memory, memory_sequence_length]
self.values = super(_BaseAttentionMechanismV2, self).__call__(
inputs, setup_memory=True)
def build(self, input_shape):
if not self._memory_initialized:
# This is for setting up the memory, which contains memory and optional
# memory_sequence_length. Build the memory_layer with memory shape.
if self.memory_layer is not None and not self.memory_layer.built:
if isinstance(input_shape, list):
self.memory_layer.build(input_shape[0])
else:
self.memory_layer.build(input_shape)
else:
# The input_shape should be query.shape and state.shape. Use the query
# to init the query layer.
if self.query_layer is not None and not self.query_layer.built:
self.query_layer.build(input_shape[0])
def __call__(self, inputs, **kwargs):
"""Preprocess the inputs before calling `base_layer.__call__()`.
Note that there are situation here, one for setup memory, and one with
actual query and state.
1. When the memory has not been configured, we just pass all the param to
base_layer.__call__(), which will then invoke self.call() with proper
inputs, which allows this class to setup memory.
2. When the memory has already been setup, the input should contain query
and state, and optionally processed memory. If the processed memory is
not included in the input, we will have to append it to the inputs and
give it to the base_layer.__call__(). The processed memory is the output
of first invocation of self.__call__(). If we don't add it here, then from
keras perspective, the graph is disconnected since the output from
previous call is never used.
Args:
inputs: the inputs tensors.
**kwargs: dict, other keyeword arguments for the `__call__()`
"""
if self._memory_initialized:
if len(inputs) not in (2, 3):
raise ValueError("Expect the inputs to have 2 or 3 tensors, got %d" %
len(inputs))
if len(inputs) == 2:
# We append the calculated memory here so that the graph will be
# connected.
inputs.append(self.values)
return super(_BaseAttentionMechanismV2, self).__call__(inputs, **kwargs)
def call(self, inputs, mask=None, setup_memory=False, **kwargs):
"""Setup the memory or query the attention.
There are two case here, one for setup memory, and the second is query the
attention score. `setup_memory` is the flag to indicate which mode it is.
The input list will be treated differently based on that flag.
Args:
inputs: a list of tensor that could either be `query` and `state`, or
`memory` and `memory_sequence_length`. `query` is the tensor of dtype
matching `memory` and shape `[batch_size, query_depth]`. `state` is the
tensor of dtype matching `memory` and shape `[batch_size,
alignments_size]`. (`alignments_size` is memory's `max_time`). `memory`
is the memory to query; usually the output of an RNN encoder. The tensor
should be shaped `[batch_size, max_time, ...]`. `memory_sequence_length`
(optional) is the sequence lengths for the batch entries in memory. If
provided, the memory tensor rows are masked with zeros for values past
the respective sequence lengths.
mask: optional bool tensor with shape `[batch, max_time]` for the mask of
memory. If it is not None, the corresponding item of the memory should
be filtered out during calculation.
setup_memory: boolean, whether the input is for setting up memory, or
query attention.
**kwargs: Dict, other keyword arguments for the call method.
Returns:
Either processed memory or attention score, based on `setup_memory`.
"""
if setup_memory:
if isinstance(inputs, list):
if len(inputs) not in (1, 2):
raise ValueError("Expect inputs to have 1 or 2 tensors, got %d" %
len(inputs))
memory = inputs[0]
memory_sequence_length = inputs[1] if len(inputs) == 2 else None
memory_mask = mask
else:
memory, memory_sequence_length = inputs, None
memory_mask = mask
self._setup_memory(memory, memory_sequence_length, memory_mask)
# We force the self.built to false here since only memory is initialized,
# but the real query/state has not been call() yet. The layer should be
# build and call again.
self.built = False
# Return the processed memory in order to create the Keras connectivity
# data for it.
return self.values
else:
if not self._memory_initialized:
raise ValueError("Cannot query the attention before the setup of "
"memory")
if len(inputs) not in (2, 3):
raise ValueError("Expect the inputs to have query, state, and optional "
"processed memory, got %d items" % len(inputs))
# Ignore the rest of the inputs and only care about the query and state
query, state = inputs[0], inputs[1]
return self._calculate_attention(query, state)
def _setup_memory(self, memory, memory_sequence_length=None,
memory_mask=None):
"""Pre-process the memory before actually query the memory.
This should only be called once at the first invocation of call().
Args:
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros for
values past the respective sequence lengths.
memory_mask: (Optional) The boolean tensor with shape `[batch_size,
max_time]`. For any value equal to False, the corresponding value in
memory should be ignored.
"""
if self._memory_initialized:
raise ValueError("The memory for the attention has already been setup.")
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError("memory_sequence_length and memory_mask cannot be "
"used at same time for attention.")
with ops.name_scope(self.name, "BaseAttentionMechanismInit",
nest.flatten(memory)):
self.values = _prepare_memory(
memory,
memory_sequence_length=memory_sequence_length,
memory_mask=memory_mask,
check_inner_dims_defined=self._check_inner_dims_defined)
# Mark the value as check since the memory and memory mask might not
# passed from __call__(), which does not have proper keras metadata.
# TODO(omalleyt): Remove this hack once the mask the has proper keras
# history.
base_layer_utils.mark_checked(self.values)
if self.memory_layer is not None:
self.keys = self.memory_layer(self.values)
else:
self.keys = self.values
self.batch_size = (
tensor_shape.dimension_value(self.keys.shape[0]) or
array_ops.shape(self.keys)[0])
self._alignments_size = (
tensor_shape.dimension_value(self.keys.shape[1]) or
array_ops.shape(self.keys)[1])
if memory_mask is not None:
unwrapped_probability_fn = self.probability_fn
def _mask_probability_fn(score, prev):
return unwrapped_probability_fn(
_maybe_mask_score(
score,
memory_mask=memory_mask,
memory_sequence_length=memory_sequence_length,
score_mask_value=self.score_mask_value), prev)
self.probability_fn = _mask_probability_fn
self._memory_initialized = True
def _calculate_attention(self, query, state):
raise NotImplementedError(
"_calculate_attention need to be implemented by subclasses.")
def compute_mask(self, inputs, mask=None):
# There real input of the attention is query and state, and the memory layer
# mask shouldn't be pass down. Returning None for all output mask here.
return None, None
def get_config(self):
config = {}
# Since the probability_fn is likely to be a wrapped function, the child
# class should preserve the original function and how its wrapped.
if self.query_layer is not None:
config["query_layer"] = {
"class_name": self.query_layer.__class__.__name__,
"config": self.query_layer.get_config(),
}
if self.memory_layer is not None:
config["memory_layer"] = {
"class_name": self.memory_layer.__class__.__name__,
"config": self.memory_layer.get_config(),
}
# memory is a required init parameter and its a tensor. It cannot be
# serialized to config, so we put a placeholder for it.
config["memory"] = None
base_config = super(_BaseAttentionMechanismV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _process_probability_fn(self, func_name):
"""Helper method to retrieve the probably function by string input."""
valid_probability_fns = {
"softmax": nn_ops.softmax,
"hardmax": hardmax,
}
if func_name not in valid_probability_fns.keys():
raise ValueError("Invalid probability function: %s, options are %s" %
(func_name, valid_probability_fns.keys()))
return valid_probability_fns[func_name]
@classmethod
def deserialize_inner_layer_from_config(cls, config, custom_objects):
"""Helper method that reconstruct the query and memory from the config.
In the get_config() method, the query and memory layer configs are
serialized into dict for persistence, this method perform the reverse action
to reconstruct the layer from the config.
Args:
config: dict, the configs that will be used to reconstruct the object.
custom_objects: dict mapping class names (or function names) of custom
(non-Keras) objects to class/functions.
Returns:
config: dict, the config with layer instance created, which is ready to be
used as init parameters.
"""
# Reconstruct the query and memory layer for parent class.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
# Instead of updating the input, create a copy and use that.
config = config.copy()
query_layer_config = config.pop("query_layer", None)
if query_layer_config:
query_layer = deserialize_layer(
query_layer_config, custom_objects=custom_objects)
config["query_layer"] = query_layer
memory_layer_config = config.pop("memory_layer", None)
if memory_layer_config:
memory_layer = deserialize_layer(
memory_layer_config, custom_objects=custom_objects)
config["memory_layer"] = memory_layer
return config
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: the optional tensor to scale the attention score.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?" %
(query, depth, keys, key_units, key_units))
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_size, 1, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if scale is not None:
score = scale * score
return score
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
[Effective Approaches to Attention-based Neural Machine Translation.
EMNLP 2015.](https://arxiv.org/abs/1508.04025)
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
custom_key_value_fn=None,
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the memory layer of the attention mechanism.
custom_key_value_fn: (optional): The custom function for
computing keys and values.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
custom_key_value_fn=custom_key_value_fn,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_attention", [query]):
attention_g = None
if self._scale:
attention_g = variable_scope.get_variable(
"attention_g",
dtype=query.dtype,
initializer=init_ops.ones_initializer,
shape=())
score = _luong_score(query, self._keys, attention_g)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongAttentionV2(_BaseAttentionMechanismV2):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
[Effective Approaches to Attention-based Neural Machine Translation.
EMNLP 2015.](https://arxiv.org/abs/1508.04025)
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn="softmax",
dtype=None,
name="LuongAttention",
**kwargs):
"""Construct the AttentionMechanism mechanism.
Args:
units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) string, the name of function to convert the
attention score to probabilities. The default is `softmax` which is
`tf.nn.softmax`. Other options is `hardmax`, which is hardmax() within
this module. Any other value will result intovalidation error. Default
to use `softmax`.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
self.probability_fn_name = probability_fn
probability_fn = self._process_probability_fn(self.probability_fn_name)
wrapped_probability_fn = lambda score, _: probability_fn(score)
if dtype is None:
dtype = dtypes.float32
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype)
self.units = units
self.scale = scale
self.scale_weight = None
super(LuongAttentionV2, self).__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=None,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs)
def build(self, input_shape):
super(LuongAttentionV2, self).build(input_shape)
if self.scale and self.scale_weight is None:
self.scale_weight = self.add_weight(
"attention_g", initializer=init_ops.ones_initializer, shape=())
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: Same as the alignments.
"""
score = _luong_score(query, self.keys, self.scale_weight)
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"scale": self.scale,
"probability_fn": self.probability_fn_name,
}
base_config = super(LuongAttentionV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = _BaseAttentionMechanismV2.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects)
return cls(**config)
def _bahdanau_score(processed_query,
keys,
attention_v,
attention_g=None,
attention_b=None):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set please pass in attention_g and attention_b.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
attention_v: Tensor, shape `[num_units]`.
attention_g: Optional scalar tensor for normalization.
attention_b: Optional tensor with shape `[num_units]` for normalization.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
if attention_g is not None and attention_b is not None:
normed_v = attention_g * attention_v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(attention_v)))
return math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + attention_b), [2])
else:
return math_ops.reduce_sum(
attention_v * math_ops.tanh(keys + processed_query), [2])
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
custom_key_value_fn=None,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is `tf.nn.softmax`. Other options include
`tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
custom_key_value_fn: (optional): The custom function for
computing keys and values.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
custom_key_value_fn=custom_key_value_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
attention_v = variable_scope.get_variable(
"attention_v", [self._num_units], dtype=query.dtype)
if not self._normalize:
attention_g = None
attention_b = None
else:
attention_g = variable_scope.get_variable(
"attention_g",
dtype=query.dtype,
initializer=init_ops.constant_initializer(
math.sqrt((1. / self._num_units))),
shape=())
attention_b = variable_scope.get_variable(
"attention_b", [self._num_units],
dtype=query.dtype,
initializer=init_ops.zeros_initializer())
score = _bahdanau_score(
processed_query,
self._keys,
attention_v,
attention_g=attention_g,
attention_b=attention_b)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class BahdanauAttentionV2(_BaseAttentionMechanismV2):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn="softmax",
kernel_initializer="glorot_uniform",
dtype=None,
name="BahdanauAttention",
**kwargs):
"""Construct the Attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) string, the name of function to convert the
attention score to probabilities. The default is `softmax` which is
`tf.nn.softmax`. Other options is `hardmax`, which is hardmax() within
this module. Any other value will result into validation error. Default
to use `softmax`.
kernel_initializer: (optional), the name of the initializer for the
attention kernel.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
self.probability_fn_name = probability_fn
probability_fn = self._process_probability_fn(self.probability_fn_name)
wrapped_probability_fn = lambda score, _: probability_fn(score)
if dtype is None:
dtype = dtypes.float32
query_layer = kwargs.pop("query_layer", None)
if not query_layer:
query_layer = layers.Dense(
units, name="query_layer", use_bias=False, dtype=dtype)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype)
self.units = units
self.normalize = normalize
self.kernel_initializer = initializers.get(kernel_initializer)
self.attention_v = None
self.attention_g = None
self.attention_b = None
super(BahdanauAttentionV2, self).__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=query_layer,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs)
def build(self, input_shape):
super(BahdanauAttentionV2, self).build(input_shape)
if self.attention_v is None:
self.attention_v = self.add_weight(
"attention_v", [self.units],
dtype=self.dtype,
initializer=self.kernel_initializer)
if self.normalize and self.attention_g is None and self.attention_b is None:
self.attention_g = self.add_weight(
"attention_g",
initializer=init_ops.constant_initializer(
math.sqrt((1. / self.units))),
shape=())
self.attention_b = self.add_weight(
"attention_b",
shape=[self.units],
initializer=init_ops.zeros_initializer())
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: same as alignments.
"""
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(
processed_query,
self.keys,
self.attention_v,
attention_g=self.attention_g,
attention_b=self.attention_b)
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"normalize": self.normalize,
"probability_fn": self.probability_fn_name,
"kernel_initializer": initializers.serialize(self.kernel_initializer)
}
base_config = super(BahdanauAttentionV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = _BaseAttentionMechanismV2.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects)
return cls(**config)
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with ops.name_scope(None, "SafeCumprod", [x]):
x = ops.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return math_ops.exp(
math_ops.cumsum(
math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs))
def monotonic_attention(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see `Online and Linear-Time
Attention by Enforcing Monotonic Alignments`.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. * 'recursive' uses tf.scan to
recursively compute the distribution. This is slowest but is exact,
general, and does not suffer from numerical instabilities. * 'parallel'
uses parallelized cumulative-sum and cumulative-product operations to
compute a closed-form solution to the recurrence relation defining the
attention distribution. This makes it more efficient than 'recursive',
but it requires numerical checks which make the distribution non-exact.
This can be a problem in particular when input_sequence_length is long
and/or p_choose_i has entries very close to 0 or 1. * 'hard' requires that
the probabilities in p_choose_i are all either 0 or 1, and subsequently
uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = ops.convert_to_tensor(
previous_attention, name="previous_attention")
if mode == "recursive":
# Use .shape[0] when it's not None, or fall back on symbolic shape
batch_size = tensor_shape.dimension_value(
p_choose_i.shape[0]) or array_ops.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = array_ops.concat(
[array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i - 1])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i * array_ops.transpose(
functional_ops.scan(
# Need to use reshape to remind TF of the shape between loop iterations
lambda x, yz: array_ops.reshape(yz[0] * x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[
array_ops.transpose(shifted_1mp_choose_i),
array_ops.transpose(previous_attention)
],
# Initial value of x is just zeros
array_ops.zeros((batch_size,))))
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i * cumprod_1mp_choose_i * math_ops.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.),
axis=1)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i * math_ops.cumprod(
1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(score,
previous_alignments,
sigmoid_noise,
mode,
seed=None):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a sigmoid
to obtain "choosing" probabilities, and then calls monotonic_attention to
obtain the attention distribution. For more information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape `[batch_size,
alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger
than 0 will encourage the model to produce large attention scores,
effectively making the choosing probabilities discrete and the resulting
attention distribution one-hot. It should be set to 0 at test-time, and
when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = random_ops.random_normal(
array_ops.shape(score), dtype=score.dtype, seed=seed)
score += sigmoid_noise * noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = math_ops.cast(score > 0, score.dtype)
else:
p_choose_i = math_ops.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32),
max_time,
dtype=dtype)
class _BaseMonotonicAttentionMechanismV2(_BaseAttentionMechanismV2):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32),
max_time,
dtype=dtype)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed)
super(BahdanauMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_monotonic_attention",
[query]):
processed_query = self.query_layer(query) if self.query_layer else query
attention_v = variable_scope.get_variable(
"attention_v", [self._num_units], dtype=query.dtype)
if not self._normalize:
attention_g = None
attention_b = None
else:
attention_g = variable_scope.get_variable(
"attention_g",
dtype=query.dtype,
initializer=init_ops.constant_initializer(
math.sqrt((1. / self._num_units))),
shape=())
attention_b = variable_scope.get_variable(
"attention_b", [self._num_units],
dtype=query.dtype,
initializer=init_ops.zeros_initializer())
score = _bahdanau_score(
processed_query,
self._keys,
attention_v,
attention_g=attention_g,
attention_b=attention_b)
score_bias = variable_scope.get_variable(
"attention_score_bias",
dtype=processed_query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class BahdanauMonotonicAttentionV2(_BaseMonotonicAttentionMechanismV2):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
units,
memory,
memory_sequence_length=None,
normalize=False,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
kernel_initializer="glorot_uniform",
dtype=None,
name="BahdanauMonotonicAttention",
**kwargs):
"""Construct the Attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
kernel_initializer: (optional), the name of the initializer for the
attention kernel.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed)
query_layer = kwargs.pop("query_layer", None)
if not query_layer:
query_layer = layers.Dense(
units, name="query_layer", use_bias=False, dtype=dtype)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype)
self.units = units
self.normalize = normalize
self.sigmoid_noise = sigmoid_noise
self.sigmoid_noise_seed = sigmoid_noise_seed
self.score_bias_init = score_bias_init
self.mode = mode
self.kernel_initializer = initializers.get(kernel_initializer)
self.attention_v = None
self.attention_score_bias = None
self.attention_g = None
self.attention_b = None
super(BahdanauMonotonicAttentionV2, self).__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=query_layer,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs)
def build(self, input_shape):
super(BahdanauMonotonicAttentionV2, self).build(input_shape)
if self.attention_v is None:
self.attention_v = self.add_weight(
"attention_v", [self.units],
dtype=self.dtype,
initializer=self.kernel_initializer)
if self.attention_score_bias is None:
self.attention_score_bias = self.add_weight(
"attention_score_bias",
shape=(),
dtype=self.dtype,
initializer=init_ops.constant_initializer(
self.score_bias_init, dtype=self.dtype))
if self.normalize and self.attention_g is None and self.attention_b is None:
self.attention_g = self.add_weight(
"attention_g",
dtype=self.dtype,
initializer=init_ops.constant_initializer(
math.sqrt((1. / self.units))),
shape=())
self.attention_b = self.add_weight(
"attention_b", [self.units],
dtype=self.dtype,
initializer=init_ops.zeros_initializer())
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(
processed_query,
self.keys,
self.attention_v,
attention_g=self.attention_g,
attention_b=self.attention_b)
score += self.attention_score_bias
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"normalize": self.normalize,
"sigmoid_noise": self.sigmoid_noise,
"sigmoid_noise_seed": self.sigmoid_noise_seed,
"score_bias_init": self.score_bias_init,
"mode": self.mode,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
}
base_config = super(BahdanauMonotonicAttentionV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = _BaseAttentionMechanismV2.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects)
return cls(**config)
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed)
super(LuongMonotonicAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._score_bias_init = score_bias_init
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_monotonic_attention",
[query]):
attention_g = None
if self._scale:
attention_g = variable_scope.get_variable(
"attention_g",
dtype=query.dtype,
initializer=init_ops.ones_initializer,
shape=())
score = _luong_score(query, self._keys, attention_g)
score_bias = variable_scope.get_variable(
"attention_score_bias",
dtype=query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongMonotonicAttentionV2(_BaseMonotonicAttentionMechanismV2):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
[Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017.](https://arxiv.org/abs/1704.00784)
"""
def __init__(self,
units,
memory,
memory_sequence_length=None,
scale=False,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention",
**kwargs):
"""Construct the Attention mechanism.
Args:
units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
**kwargs: Dictionary that contains other common arguments for layer
creation.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn,
sigmoid_noise=sigmoid_noise,
mode=mode,
seed=sigmoid_noise_seed)
memory_layer = kwargs.pop("memory_layer", None)
if not memory_layer:
memory_layer = layers.Dense(
units, name="memory_layer", use_bias=False, dtype=dtype)
self.units = units
self.scale = scale
self.sigmoid_noise = sigmoid_noise
self.sigmoid_noise_seed = sigmoid_noise_seed
self.score_bias_init = score_bias_init
self.mode = mode
self.attention_g = None
self.attention_score_bias = None
super(LuongMonotonicAttentionV2, self).__init__(
memory=memory,
memory_sequence_length=memory_sequence_length,
query_layer=None,
memory_layer=memory_layer,
probability_fn=wrapped_probability_fn,
name=name,
dtype=dtype,
**kwargs)
def build(self, input_shape):
super(LuongMonotonicAttentionV2, self).build(input_shape)
if self.scale and self.attention_g is None:
self.attention_g = self.add_weight(
"attention_g", initializer=init_ops.ones_initializer, shape=())
if self.attention_score_bias is None:
self.attention_score_bias = self.add_weight(
"attention_score_bias",
shape=(),
initializer=init_ops.constant_initializer(
self.score_bias_init, dtype=self.dtype))
self.built = True
def _calculate_attention(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape `[batch_size,
query_depth]`.
state: Tensor of dtype matching `self.values` and shape `[batch_size,
alignments_size]` (`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
next_state: Same as alignments
"""
score = _luong_score(query, self.keys, self.attention_g)
score += self.attention_score_bias
alignments = self.probability_fn(score, state)
next_state = alignments
return alignments, next_state
def get_config(self):
config = {
"units": self.units,
"scale": self.scale,
"sigmoid_noise": self.sigmoid_noise,
"sigmoid_noise_seed": self.sigmoid_noise_seed,
"score_bias_init": self.score_bias_init,
"mode": self.mode,
}
base_config = super(LuongMonotonicAttentionV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = _BaseAttentionMechanismV2.deserialize_inner_layer_from_config(
config, custom_objects=custom_objects)
return cls(**config)
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history", "attention_state"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
- `attention_state`: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape. This
will be validated, and original fields' shape will be propagated to new
fields.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor):
if not context.executing_eagerly():
return tensor_util.with_same_shape(old, new)
else:
if old.shape.as_list() != new.shape.as_list():
raise ValueError("The shape of the AttentionWrapperState is "
"expected to be same as the one to clone. "
"self.shape: %s, input.shape: %s" %
(old.shape, new.shape))
return new
return new
return nest.map_structure(
with_same_shape, self,
super(AttentionWrapperState, self)._replace(**kwargs))
def _prepare_memory(memory,
memory_sequence_length=None,
memory_mask=None,
check_inner_dims_defined=True):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
memory_mask: `boolean` tensor with shape [batch_size, max_time]. The memory
should be skipped when the corresponding mask is False.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost dimensions
are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(lambda m: ops.convert_to_tensor(m, name="memory"),
memory)
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError("memory_sequence_length and memory_mask can't be provided "
"at same time.")
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None and memory_mask is None:
return memory
elif memory_sequence_length is not None:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
else:
# For memory_mask is not None
seq_len_mask = math_ops.cast(
memory_mask, dtype=nest.flatten(memory)[0].dtype)
def _maybe_mask(m, seq_len_mask):
"""Mask the memory based on the memory mask."""
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score,
memory_sequence_length=None,
memory_mask=None,
score_mask_value=None):
"""Mask the attention score based on the masks."""
if memory_sequence_length is None and memory_mask is None:
return score
if memory_sequence_length is not None and memory_mask is not None:
raise ValueError("memory_sequence_length and memory_mask can't be provided "
"at same time.")
if memory_sequence_length is not None:
message = "All values in memory_sequence_length must be greater than zero."
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]):
memory_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1])
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(memory_mask, score, score_mask_values)
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if tensor_shape.dimension_value(logits.get_shape()[-1]) is not None:
depth = tensor_shape.dimension_value(logits.get_shape()[-1])
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
def _compute_attention(attention_mechanism, cell_output, attention_state,
attention_layer):
"""Computes the attention and alignments for a given attention_mechanism."""
if isinstance(attention_mechanism, _BaseAttentionMechanismV2):
alignments, next_attention_state = attention_mechanism(
[cell_output, attention_state])
else:
# For other class, assume they are following _BaseAttentionMechanism, which
# takes query and state as separate parameter.
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context_ = math_ops.matmul(expanded_alignments, attention_mechanism.values)
context_ = array_ops.squeeze(context_, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context_], 1))
else:
attention = context_
return attention, alignments, next_attention_state
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention."""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None,
attention_layer=None,
attention_fn=None,
dtype=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
`tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length. If
attention_layer is set, this must be None. If attention_fn is set, it
must guaranteed that the outputs of attention_fn also meet the above
requirements.
alignment_history: Python boolean, whether to store alignment history from
all time steps in the final output state (currently stored as a time
major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is the
output of `cell`. This is the behavior of Bhadanau-style attention
mechanisms. In both cases, the `attention` tensor is propagated to the
next time step via the state and is used there. This flag only controls
whether the attention mechanism is propagated up to the next cell in an
RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when the
user calls `zero_state()`. Note that if this value is provided now, and
the user uses a `batch_size` argument of `zero_state` which does not
match the batch size of `initial_cell_state`, proper behavior is not
guaranteed.
name: Name to use when creating ops.
attention_layer: A list of `tf.compat.v1.layers.Layer` instances or a
single `tf.compat.v1.layers.Layer` instance taking the context and cell
output as inputs to generate attention at each time step. If None
(default), use the context as attention at each time step. If
attention_mechanism is a list, attention_layer must be a list of the
same length. If attention_layers_size is set, this must be None.
attention_fn: An optional callable function that allows users to provide
their own customized attention function, which takes input
(attention_mechanism, cell_output, attention_state, attention_layer) and
outputs (attention, alignments, next_attention_state). If provided, the
attention_layer_size should be the size of the outputs of attention_fn.
dtype: The cell dtype
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`;
if `attention_layer_size` and `attention_layer` are set simultaneously.
"""
super(AttentionWrapper, self).__init__(name=name, dtype=dtype)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError("attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s" %
type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s" %
type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError("cell_input_fn must be callable, saw type: %s" %
type(cell_input_fn).__name__)
if attention_layer_size is not None and attention_layer is not None:
raise ValueError("Only one of attention_layer_size and attention_layer "
"should be set")
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size if isinstance(attention_layer_size, (
list, tuple)) else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d" %
(len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
elif attention_layer is not None:
self._attention_layers = tuple(
attention_layer if isinstance(attention_layer, (list, tuple)) else (
attention_layer,))
if len(self._attention_layers) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer must contain exactly one "
"layer per attention_mechanism, saw: %d vs %d" %
(len(self._attention_layers), len(attention_mechanisms)))
self._attention_layer_size = sum(
tensor_shape.dimension_value(
layer.compute_output_shape([
None, cell.output_size +
tensor_shape.dimension_value(mechanism.values.shape[-1])
])[-1]) for layer, mechanism in zip(self._attention_layers,
attention_mechanisms))
else:
self._attention_layers = None
self._attention_layer_size = sum(
tensor_shape.dimension_value(attention_mechanism.values.shape[-1])
for attention_mechanism in attention_mechanisms)
if attention_fn is None:
attention_fn = _compute_attention
self._attention_fn = attention_fn
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
tensor_shape.dimension_value(final_state_tensor.shape[0]) or
array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [
check_ops.assert_equal(
batch_size, attention_mechanism.batch_size, message=error_message)
for attention_mechanism in self._attention_mechanisms
]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.get_initial_state(
batch_size=batch_size, dtype=dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms
]
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape) if self._alignment_history else
() for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing tensors from the
previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
tensor_shape.dimension_value(cell_output.shape[0]) or
array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = array_ops.identity(cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = self._attention_fn(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = array_ops.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
| {
"content_hash": "b69f5c7867fde8041f9139eeea109960",
"timestamp": "",
"source": "github",
"line_count": 2524,
"max_line_length": 118,
"avg_line_length": 41.83280507131537,
"alnum_prop": 0.6630424488095013,
"repo_name": "chemelnucfin/tensorflow",
"id": "0e19d1e320520f9ffe0017dadd1caf9c834e1054",
"size": "106275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
import pygame
import math
class Tree:
root = None
_next_id = -1
depth = -1
numChildren = -1
positions = {} # (index,depth) array
def __init__(self,depth,numChildren):
self.depth = depth
self.numChildren = numChildren
self.root = self.buildTree(self.depth,self.getNextId)
self.setIndexes(self.root,self.numChildren)
def getNextId(self):
self._next_id = self._next_id + 1
return self._next_id
# Recursively builds N-array Tree in preorder
def buildTree(self, depth, lastid):
children = {}
if depth > 0:
for i in range(0,self.numChildren):
thisid = self.getNextId()
children[thisid] = self.buildTree(depth-1,thisid)
self.positions[lastid] = [-1, self.depth-depth]
n = Node(lastid)
n.setChildren(children)
return n
# Recursively sets serial indexes into positions for tree with N children
# index = parentindex * numchildren - lateral position in tree
# split into two loops, one to set indexes of a node's children, the second to traverse the tree
def setIndexes(self,root,numChildren, parentindex=None):
if root:
if self.positions[root.ID][0] == -1 and self.positions[root.ID][1] == 0: # If this is the root of the tree
parentindex = 1
self.positions[root.ID][0] = parentindex
i = numChildren - 1
for ids,child in root.children.items():
index = parentindex * numChildren - i
self.positions[child.ID][0] = index
i = i -1
for ids,child in root.children.items():
self.setIndexes(child,numChildren,self.positions[child.ID][0])
#iterates over the positions dict and sets x and y for each node
def mapXY(self, root, numChildren,windowSizeX,windowSizeY):
for ids, positions in self.positions.items():
index = positions[0]
depth = positions[1]
x = index * windowSizeX / (numChildren ** depth +1)
y = depth * windowSizeY / self.depth
actingNode = root.getNode(ids)
actingNode.coordinates[0] = x
actingNode.coordinates[1] = y+10 #Added 10 so the drawing is not on the top edge of the window
def drawTree(self,root,windowSurface,color,linecolor):
if root:
pygame.draw.circle(windowSurface,color,(math.floor(root.coordinates[0]),math.floor(root.coordinates[1])),5,0)
for id,child in root.children.items():
pygame.draw.line(windowSurface,linecolor,(root.coordinates[0],root.coordinates[1]),(child.coordinates[0],child.coordinates[1]),2)
self.drawTree(child,windowSurface,color,linecolor)
class Node:
coordinates = None # (x,y) array
ID = None # Unique identifier
children = {} # Children dict
def __init__(self,ID):
self.pos = [-1,-1] # place holders
self.coordinates = [-1,-1]
self.ID = ID
def setChildren(self, children):
self.children = children
def addChild(self, child):
if child:
self.children[child.ID] = child
def getChild(self, ID):
return self.children.get(ID)
def getNode(self, ID, root = None, default = None):
if root == None:
root = self
if root.ID == ID:
return root
elif root.children is not None:
for childID in root.children:
output = (root.getChild(childID)).getNode(ID)
if output is not None:
return output
return default
if __name__ == "__main__":
BLACK = (0, 0, 0)
WHITE = (255,255,255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
windowSurface = pygame.display.set_mode((1000,600),0,32) #make 800x600 window
windowSurface.fill(WHITE)
numChildren = 4
depth = 3
t = Tree(depth,numChildren)
t.mapXY(t.root,numChildren,1000,400)
t.drawTree(t.root,windowSurface,BLUE,RED)
pygame.display.update()
| {
"content_hash": "4ca916e016ab95936eb488f91b66f41e",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 133,
"avg_line_length": 31.238938053097346,
"alnum_prop": 0.6920679886685552,
"repo_name": "shrub77/avtomation-blog-code",
"id": "e42dda8c06ebfb4f68ae21b0a2167aab30949e4e",
"size": "3621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "n-array-trees/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3621"
}
],
"symlink_target": ""
} |
import json
import os
class Configuration(object):
def __init__(self, directory):
self.package = None
self.name = None
self.icon_name = None
self.version = None
self.numeric_version = None
self.orientation = None
self.permissions = []
self.include_pil = False
self.include_sqlite = False
self.layout = "internal"
self.source = False
self.expansion = False
self.targetsdk = 26
try:
with file(os.path.join(directory, ".android.json"), "r") as f:
d = json.load(f)
self.__dict__.update(d)
except:
pass
def save(self, directory):
with file(os.path.join(directory, ".android.json"), "w") as f:
json.dump(self.__dict__, f)
def set_version(config, value):
"""
Sets the version, and tries to set the numeric versions based on the
version number.
"""
config.version = value
try:
v = 0
for i in config.version.split('.'):
v *= 100
v += int(i)
config.numeric_version = str(v)
except:
pass
def configure(interface, directory):
renpy = os.path.exists("renpy")
config = Configuration(directory)
config.name = interface.input("""What is the full name of your application? This name will appear in the list of installed applications.""", config.name)
if config.icon_name is None:
config.icon_name = config.name
config.icon_name = interface.input("What is the short name of your application? This name will be used in the launcher, and for application shortcuts.", config.icon_name)
config.package = interface.input("""\
What is the name of the package?
This is usually of the form com.domain.program or com.domain.email.program. It
must only contain ASCII letters and dots.""", config.package)
version = interface.input("""\
What is the application's version?
This should be the human-readable version that you would present to a person.""", config.version)
set_version(config, version)
config.numeric_version = interface.input("""What is the version code?
This should be an integer number, and the value should increase between versions.""", config.numeric_version)
config.targetsdk = interface.input("""What is the targetsdk version of the app?
This should be an integer number, for example API 28 for targeting android Pie (9.0).""", config.targetsdk)
config.orientation = interface.choice("How would you like your application to be displayed?", [
("sensorLandscape", "In landscape mode."),
("sensorPortrait", "In portrait mode."),
], config.orientation)
config.expansion = interface.choice("Would you like to create an expansion APK?", [
(False, "No. Size limit of 50 MB on Google Play, but can be distributed through other store and sideloaded."),
(True, "Yes. 2 GB size limit, but won't work outside of Google Play.")
], config.expansion)
if not renpy:
config.layout = interface.choice("How is your application laid out?", [
("internal", "A single directory, that will be placed on device internal storage."),
("external", "A single directory, that will be placed on device external storage."),
("split", "Multiple directories that correspond to internal, external, and asset storage."),
], config.layout)
config.source = interface.yesno("Do you want to include the Python source code of your application in the archive? If you include it once, you'll need to include it always.", config.source)
permissions = " ".join(config.permissions)
permissions = interface.input("""\
What permissions should your application have? Possible permissions include:
INTERNET (network access), VIBRATE (vibration control).
Please enter a space-separated list of permissions.""", permissions)
config.permissions = permissions.split()
config.include_sqlite = interface.yesno("Do you want to include SQLite3 with your application?", config.include_sqlite)
config.include_pil = interface.yesno("Do you want to include the Python Imaging Library (PIL) with your application?", config.include_pil)
if renpy:
if not config.expansion:
internet = "INTERNET" in config.permissions
internet = interface.yesno("Do you want to allow the app to access the Internet?", internet)
else:
internet = False # included in template.
permissions = [ i for i in config.permissions if i not in [ "INTERNET" ] ]
if internet:
permissions.append("INTERNET")
config.permissions = permissions
config.save(directory)
def set_config(iface, directory, var, value):
config = Configuration(directory)
if var == "version":
set_version(config, value)
elif var == "permissions":
config.permissions = value.split()
elif hasattr(config, var):
setattr(config, var, value)
else:
iface.fail("Unknown configuration variable: {}".format(var))
config.save(directory)
| {
"content_hash": "eae4a1551a959d1ce30383f14c8b5891",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 197,
"avg_line_length": 34.51592356687898,
"alnum_prop": 0.6252076028787599,
"repo_name": "pymo/pymo",
"id": "00c026a0636ec4e47af42d826dbfa21d5e216f14",
"size": "5419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "android/pgs4a/buildlib/configure.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3425"
},
{
"name": "C",
"bytes": "581274"
},
{
"name": "C++",
"bytes": "151108"
},
{
"name": "Clarion",
"bytes": "2743"
},
{
"name": "Groff",
"bytes": "13374"
},
{
"name": "HTML",
"bytes": "240526"
},
{
"name": "Java",
"bytes": "153187"
},
{
"name": "Makefile",
"bytes": "144854"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "27673732"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "29384"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import argparse
import csv
import sys
from gzip import GzipFile
import numpy as np
from gensim.models.word2vec import Word2Vec
parser = argparse.ArgumentParser(description='Expansion.')
parser.add_argument('-d', default=1, nargs='?', type=float)
parser.add_argument('--w2v', required=True, type=argparse.FileType('rb'))
parser.add_argument('--predicted', type=argparse.FileType('rb'))
parser.add_argument('--gzip', default=False, action='store_true')
args = parser.parse_args()
def predictions(f):
i = 0
while True:
try:
for Y_hat in np.load(f):
yield Y_hat
i += 1
except OSError:
break
print('%d slices passed' % i, flush=True, file=sys.stderr)
w2v = Word2Vec.load_word2vec_format(args.w2v, binary=True, unicode_errors='ignore')
w2v.init_sims(replace=True)
print('Using %d word2vec dimensions from "%s".' % (w2v.layer1_size, args.w2v.name), flush=True, file=sys.stderr)
with args.predicted if not args.gzip else GzipFile(fileobj=args.predicted, mode='rb') as f:
Y_hat_all = predictions(f)
reader = csv.reader(sys.stdin, delimiter='\t', quoting=csv.QUOTE_NONE)
for i, (hyponym, hypernym, similarity) in enumerate(reader):
Y_hat = next(Y_hat_all)
distance = np.linalg.norm(w2v[hypernym] - Y_hat)
if distance < args.d:
print('\t'.join((hyponym, hypernym, similarity)))
| {
"content_hash": "f0dbf0978e86a1ea33c3453045ae2259",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 112,
"avg_line_length": 29.97872340425532,
"alnum_prop": 0.6628814762242725,
"repo_name": "dustalov/watlink",
"id": "39dafa416094e161e615985cf8dbf37f56b410ca",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exp/threshold.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "8038"
},
{
"name": "Makefile",
"bytes": "4453"
},
{
"name": "Python",
"bytes": "22401"
},
{
"name": "Shell",
"bytes": "5554"
},
{
"name": "XSLT",
"bytes": "5303"
}
],
"symlink_target": ""
} |
"""
This tests the moist saturated hydrostatic balance, by setting up a vertical
slice with the appropriate initialisation procedure, before taking a few time
steps and ensuring that the resulting velocities are very small.
"""
from gusto import *
from firedrake import (PeriodicIntervalMesh, ExtrudedMesh, Constant, Function,
FunctionSpace, VectorFunctionSpace)
from os import path
from netCDF4 import Dataset
import pytest
def setup_saturated(dirname, recovered):
# set up grid and time stepping parameters
dt = 1.
tmax = 3.
deltax = 400.
L = 2000.
H = 10000.
nlayers = int(H/deltax)
ncolumns = int(L/deltax)
m = PeriodicIntervalMesh(ncolumns, L)
mesh = ExtrudedMesh(m, layers=nlayers, layer_height=H/nlayers)
# option to easily change between recovered and not if necessary
# default should be to use lowest order set of spaces
degree = 0 if recovered else 1
output = OutputParameters(dirname=dirname+'/saturated_balance', dumpfreq=1, dumplist=['u'])
parameters = CompressibleParameters()
diagnostic_fields = [Theta_e()]
state = State(mesh,
dt=dt,
output=output,
parameters=parameters,
diagnostic_fields=diagnostic_fields)
tracers = [WaterVapour(), CloudWater()]
if recovered:
u_transport_option = "vector_advection_form"
else:
u_transport_option = "vector_invariant_form"
eqns = CompressibleEulerEquations(
state, "CG", degree, u_transport_option=u_transport_option, active_tracers=tracers)
# Initial conditions
rho0 = state.fields("rho")
theta0 = state.fields("theta")
water_v0 = state.fields("vapour_mixing_ratio")
water_c0 = state.fields("cloud_liquid_mixing_ratio")
moisture = ['vapour_mixing_ratio', 'cloud_liquid_mixing_ratio']
# spaces
Vt = theta0.function_space()
# Isentropic background state
Tsurf = Constant(300.)
total_water = Constant(0.02)
theta_e = Function(Vt).interpolate(Tsurf)
water_t = Function(Vt).interpolate(total_water)
# Calculate hydrostatic exner
saturated_hydrostatic_balance(state, theta_e, water_t)
water_c0.assign(water_t - water_v0)
state.set_reference_profiles([('rho', rho0),
('theta', theta0)])
# Set up transport schemes
if recovered:
VDG1 = state.spaces("DG1_equispaced")
VCG1 = FunctionSpace(mesh, "CG", 1)
Vu_DG1 = VectorFunctionSpace(mesh, VDG1.ufl_element())
Vu_CG1 = VectorFunctionSpace(mesh, "CG", 1)
u_opts = RecoveryOptions(embedding_space=Vu_DG1,
recovered_space=Vu_CG1,
boundary_method=BoundaryMethod.taylor)
rho_opts = RecoveryOptions(embedding_space=VDG1,
recovered_space=VCG1,
boundary_method=BoundaryMethod.taylor)
theta_opts = RecoveryOptions(embedding_space=VDG1,
recovered_space=VCG1)
wv_opts = RecoveryOptions(embedding_space=VDG1,
recovered_space=VCG1)
wc_opts = RecoveryOptions(embedding_space=VDG1,
recovered_space=VCG1)
else:
rho_opts = None
theta_opts = EmbeddedDGOptions()
wv_opts = EmbeddedDGOptions()
wc_opts = EmbeddedDGOptions()
transported_fields = [SSPRK3(state, 'rho', options=rho_opts),
SSPRK3(state, 'theta', options=theta_opts),
SSPRK3(state, 'vapour_mixing_ratio', options=wv_opts),
SSPRK3(state, 'cloud_liquid_mixing_ratio', options=wc_opts)]
if recovered:
transported_fields.append(SSPRK3(state, 'u', options=u_opts))
else:
transported_fields.append(ImplicitMidpoint(state, 'u'))
linear_solver = CompressibleSolver(state, eqns, moisture=moisture)
# add physics
physics_list = [Condensation(state)]
# build time stepper
stepper = SemiImplicitQuasiNewton(eqns, state, transported_fields,
linear_solver=linear_solver,
physics_list=physics_list)
return stepper, tmax
def run_saturated(dirname, recovered):
stepper, tmax = setup_saturated(dirname, recovered)
stepper.run(t=0, tmax=tmax)
@pytest.mark.parametrize("recovered", [True, False])
def test_saturated_setup(tmpdir, recovered):
dirname = str(tmpdir)
run_saturated(dirname, recovered)
filename = path.join(dirname, "saturated_balance/diagnostics.nc")
data = Dataset(filename, "r")
u = data.groups['u']
umax = u.variables['max']
assert umax[-1] < 1e-5
| {
"content_hash": "77cc119ce4a04cab9f22377272f905c5",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 95,
"avg_line_length": 34.156028368794324,
"alnum_prop": 0.6214700996677741,
"repo_name": "firedrakeproject/gusto",
"id": "fa61406f5bc50e7ef67fbcddcd4378fa44f9d0eb",
"size": "4816",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "integration-tests/balance/test_saturated_balance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "765"
},
{
"name": "Python",
"bytes": "567084"
}
],
"symlink_target": ""
} |
import datetime
import functools
import os
import re
import urllib
from flask import (Flask, abort, flash, Markup, redirect, render_template,
request, Response, session, url_for)
from markdown import markdown
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.extensions.extra import ExtraExtension
from micawber import bootstrap_basic, parse_html
from micawber.cache import Cache as OEmbedCache
from peewee import *
from playhouse.flask_utils import FlaskDB, get_object_or_404, object_list
from playhouse.sqlite_ext import *
ADMIN_PASSWORD = 'secret'
APP_DIR = os.path.dirname(os.path.realpath(__file__))
DATABASE = 'sqliteext:///%s' % os.path.join(APP_DIR, 'blog.db')
DEBUG = False
SECRET_KEY = 'shhh, secret!' # Used by Flask to encrypt session cookie.
SITE_WIDTH = 800
app = Flask(__name__)
app.config.from_object(__name__)
flask_db = FlaskDB(app)
database = flask_db.database
oembed_providers = bootstrap_basic(OEmbedCache())
# database structure code
class Entry(flask_db.Model):
title = CharField()
slug = CharField(unique=True)
content = TextField()
published = BooleanField(index=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
@property
def html_content(self):
hilite = CodeHiliteExtension(linenums=False, css_class='highlight')
extras = ExtraExtension()
markdown_content = markdown(self.content, extensions=[hilite, extras])
oembed_content = parse_html(
markdown_content,
oembed_providers,
urlize_all=True,
maxwidth=app.config['SITE_WIDTH'])
return Markup(oembed_content)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = re.sub('[^\w]+', '-', self.title.lower())
ret = super(Entry, self).save(*args, **kwargs)
# Store search content.
self.update_search_index()
return ret
def update_search_index(self):
try:
fts_entry = FTSEntry.get(FTSEntry.entry_id == self.id)
except FTSEntry.DoesNotExist:
fts_entry = FTSEntry(entry_id=self.id)
force_insert = True
else:
force_insert = False
fts_entry.content = '\n'.join((self.title, self.content))
fts_entry.save(force_insert=force_insert)
@classmethod
def public(cls):
return Entry.select().where(Entry.published == True)
@classmethod
def drafts(cls):
return Entry.select().where(Entry.published == False)
@classmethod
def search(cls, query):
words = [word.strip() for word in query.split() if word.strip()]
if not words:
# Return empty query.
return Entry.select().where(Entry.id == 0)
else:
search = ' '.join(words)
return (FTSEntry
.select(
FTSEntry,
Entry,
FTSEntry.rank().alias('score'))
.join(Entry, on=(FTSEntry.entry_id == Entry.id).alias('entry'))
.where(
(Entry.published == True) &
(FTSEntry.match(search)))
.order_by(SQL('score').desc()))
class FTSEntry(FTSModel):
entry_id = IntegerField()
content = TextField()
class Meta:
database = database
def login_required(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
if session.get('logged_in'):
return fn(*args, **kwargs)
return redirect(url_for('login', next=request.path))
return inner
@app.route('/login/', methods=['GET', 'POST'])
def login():
next_url = request.args.get('next') or request.form.get('next')
if request.method == 'POST' and request.form.get('password'):
password = request.form.get('password')
if password == app.config['ADMIN_PASSWORD']:
session['logged_in'] = True
session.permanent = True # Use cookie to store session.
flash('You are now logged in.', 'success')
return redirect(next_url or url_for('index'))
else:
flash('Incorrect password.', 'danger')
return render_template('login.html', next_url=next_url)
@app.route('/logout/', methods=['GET', 'POST'])
def logout():
if request.method == 'POST':
session.clear()
return redirect(url_for('login'))
return render_template('logout.html')
@app.route('/')
def index():
search_query = request.args.get('q')
if search_query:
query = Entry.search(search_query)
else:
query = Entry.public().order_by(Entry.timestamp.desc())
return object_list('index.html', query, search=search_query)
def _create_or_edit(entry, template):
if request.method == 'POST':
entry.title = request.form.get('title') or ''
entry.content = request.form.get('content') or ''
entry.published = request.form.get('published') or False
if not (entry.title and entry.content):
flash('Title and Content are required.', 'danger')
else:
# Wrap the call to save in a transaction so we can roll it back
# cleanly in the event of an integrity error.
try:
with database.atomic():
entry.save()
except IntegrityError:
flash('Error: this title is already in use.', 'danger')
else:
flash('Entry saved successfully.', 'success')
if entry.published:
return redirect(url_for('detail', slug=entry.slug))
else:
return redirect(url_for('edit', slug=entry.slug))
return render_template(template, entry=entry)
@app.route('/create/', methods=['GET', 'POST'])
@login_required
def create():
return _create_or_edit(Entry(title='', content=''), 'create.html')
@app.route('/drafts/')
@login_required
def drafts():
query = Entry.drafts().order_by(Entry.timestamp.desc())
return object_list('index.html', query, check_bounds=False)
@app.route('/<slug>/')
def detail(slug):
if session.get('logged_in'):
query = Entry.select()
else:
query = Entry.public()
entry = get_object_or_404(query, Entry.slug == slug)
return render_template('detail.html', entry=entry)
@app.route('/<slug>/edit/', methods=['GET', 'POST'])
@login_required
def edit(slug):
entry = get_object_or_404(Entry, Entry.slug == slug)
return _create_or_edit(entry, 'edit.html')
@app.template_filter('clean_querystring')
def clean_querystring(request_args, *keys_to_remove, **new_values):
querystring = dict((key, value) for key, value in request_args.items())
for key in keys_to_remove:
querystring.pop(key, None)
querystring.update(new_values)
return urllib.urlencode(querystring)
@app.errorhandler(404)
def not_found(exc):
return Response('<h3>Still not working!!!</h3>'), 404
def main():
database.create_tables([Entry, FTSEntry], safe=True)
app.run(debug=True)
if __name__ == '__main__':
main()
| {
"content_hash": "e0d9150b51e323923a0d56f869fa0beb",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 79,
"avg_line_length": 33.055813953488375,
"alnum_prop": 0.6134796679330238,
"repo_name": "artopping/nyu-python",
"id": "d9f4adb8b75d4925ec6674a5d4be3ebaf9000bc4",
"size": "7205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course3/assignments/app/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "77"
},
{
"name": "CSS",
"bytes": "4488"
},
{
"name": "HTML",
"bytes": "24199"
},
{
"name": "Python",
"bytes": "180205"
},
{
"name": "Shell",
"bytes": "31730"
},
{
"name": "Vim script",
"bytes": "170"
}
],
"symlink_target": ""
} |
from lino.api import dd, rt, _
from lino_xl.lib.ledger.ui import (
PartnerVouchers, ByJournal, PrintableByJournal,
Movements, MovementsByVoucher)
from lino_xl.lib.ledger.choicelists import TradeTypes, VoucherTypes
from lino_xl.lib.ledger.roles import LedgerUser, LedgerStaff
from lino_xl.lib.ledger.mixins import ItemsByVoucher, VouchersByPartnerBase
from .choicelists import VatRegimes, VatAreas
from .mixins import VatDeclaration, VatDocument, VatVoucher
class InvoiceDetail(dd.DetailLayout):
main = "general ledger"
totals = """
total_base
total_vat
total_incl
workflow_buttons
"""
general = dd.Panel("""
entry_date number partner user
payment_term due_date your_ref vat_regime #item_vat
ItemsByInvoice
uploads.UploadsByController:60 totals:20
""", label=_("General"))
ledger = dd.Panel("""
journal accounting_period id narration
vat.MovementsByVoucher
""", label=_("Ledger"))
class Invoices(PartnerVouchers):
required_roles = dd.login_required(LedgerUser)
model = 'vat.VatAccountInvoice'
order_by = ["-id"]
column_names = "entry_date id number_with_year partner total_incl user *"
detail_layout = InvoiceDetail()
insert_layout = """
journal partner
entry_date total_incl
"""
# start_at_bottom = True
class InvoicesByJournal(ByJournal, Invoices):
# ByJournal must be before Invoices the get the right order_by
params_layout = "partner state start_period end_period user"
column_names = "number_with_year entry_date due_date " \
"your_ref partner " \
"total_incl " \
"total_base total_vat user workflow_buttons *"
#~ "ledger_remark:10 " \
insert_layout = """
partner
entry_date total_incl
"""
class PrintableInvoicesByJournal(PrintableByJournal, Invoices):
label = _("Purchase journal")
VoucherTypes.add_item_lazy(InvoicesByJournal)
class ItemsByInvoice(ItemsByVoucher):
model = 'vat.InvoiceItem'
display_mode = 'grid'
column_names = "account title vat_class total_incl total_base total_vat"
class VouchersByPartner(VouchersByPartnerBase):
label = _("VAT vouchers")
column_names = "entry_date voucher total_incl total_base total_vat"
_voucher_base = VatDocument
@dd.virtualfield('vat.VatAccountInvoice.total_incl')
def total_incl(self, row, ar):
return row.total_incl
@dd.virtualfield('vat.VatAccountInvoice.total_base')
def total_base(self, row, ar):
return row.total_base
@dd.virtualfield('vat.VatAccountInvoice.total_vat')
def total_vat(self, row, ar):
return row.total_vat
class MovementsByVoucher(MovementsByVoucher):
column_names = 'account project partner debit credit vat_class match_link cleared *'
class VatInvoices(PartnerVouchers):
abstract = True
_trade_type = None
editable = False
model = VatVoucher
column_names = 'detail_link partner partner_vat_id vat_regime total_base total_vat total_incl'
# order_by = ['entry_date', 'partner']
# order_by = ['entry_date', 'id']
# order_by = ['entry_date', 'number']
order_by = ['accounting_period', 'number']
hidden_elements = frozenset(
"""entry_date journal__trade_type journal number
journal__trade_type state user""".split())
parameters = dict(
intracom=dd.YesNo.field(_("Show intracom vouchers"), blank=True),
**PartnerVouchers.parameters)
params_layout = "partner project start_period end_period cleared intracom"
intracom_regimes = set([
r for r in VatRegimes.get_list_items() if r.vat_area == VatAreas.eu])
@classmethod
def get_request_queryset(cls, ar, **kwargs):
assert not kwargs
fkw = dict()
if cls._trade_type is not None:
fkw.update(journal__trade_type=cls._trade_type)
if ar.param_values.intracom == dd.YesNo.yes:
fkw.update(vat_regime__in=cls.intracom_regimes)
# note that we cannot use qs.filter() because this table is on an abstract model
return super(VatInvoices, cls).get_request_queryset(ar, **fkw)
# raise Exception("20170905 {}".format(qs.query))
@dd.virtualfield(dd.ForeignKey('contacts.Partner'))
def partner(cls, obj, ar=None):
return obj.partner
@dd.virtualfield('contacts.Partner.vat_id')
def partner_vat_id(cls, obj, ar=None):
return obj.partner.vat_id
dd.update_field(VatInvoices, 'detail_link', verbose_name=_("Invoice"))
class ByDeclaration(dd.Table):
abstract = True
@classmethod
def param_defaults(self, ar, **kw):
kw = super(ByDeclaration, self).param_defaults(ar, **kw)
mi = ar.master_instance
if mi is not None:
kw.update(start_period=mi.start_period, end_period=mi.end_period)
# print("20191205", kw)
return kw
class MovementsByDeclaration(ByDeclaration, Movements):
label = _("Declared movements")
master = VatDeclaration
# exclude = dict(vat_class="")
column_names = "value_date voucher_link description debit credit account__vat_column vat_class vat_regime *"
class SalesByDeclaration(ByDeclaration, VatInvoices):
_trade_type = TradeTypes.sales
label = _("VAT sales")
master = VatDeclaration
class PurchasesByDeclaration(ByDeclaration, VatInvoices):
_trade_type = TradeTypes.purchases
label = _("VAT purchases")
master = VatDeclaration
class IntracomInvoices(VatInvoices):
abstract = True
@classmethod
def param_defaults(self, ar, **kw):
kw = super(IntracomInvoices, self).param_defaults(ar, **kw)
kw.update(intracom=dd.YesNo.yes)
return kw
class IntracomSales(IntracomInvoices):
_trade_type = TradeTypes.sales
label = _("Intra-Community sales")
# model = "sales.VatProductInvoice"
class IntracomPurchases(IntracomInvoices):
_trade_type = TradeTypes.purchases
label = _("Intra-Community purchases")
| {
"content_hash": "272a5a05368068116150b2a4cf1ee24c",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 112,
"avg_line_length": 31.49473684210526,
"alnum_prop": 0.6789772727272727,
"repo_name": "lino-framework/xl",
"id": "4fe768d0521a64e67aa8231ee8b103ee1be35330",
"size": "6101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_xl/lib/vat/desktop.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "186625"
},
{
"name": "HTML",
"bytes": "1417287"
},
{
"name": "JavaScript",
"bytes": "1630929"
},
{
"name": "PHP",
"bytes": "40437"
},
{
"name": "Python",
"bytes": "2395471"
}
],
"symlink_target": ""
} |
from flask import render_template, session, redirect, url_for, request, current_app
from . import main
from flask.ext.login import login_required
@main.route('/')
def index():
return render_template('main/index.html')
| {
"content_hash": "684735238f0b118b16ad1501ff14d023",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 17.615384615384617,
"alnum_prop": 0.7292576419213974,
"repo_name": "youqingkui/wx.youqingkui.me",
"id": "f5df7cc24bb9b12d893a8a0ba05273d33d9a785c",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2492"
},
{
"name": "Python",
"bytes": "10762"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
} |
import sys, os
import zipfile
from lxml import etree
def get_epub_info(fname, forceMime = False):
ns = {
'n':'urn:oasis:names:tc:opendocument:xmlns:container',
'pkg':'http://www.idpf.org/2007/opf',
'dc':'http://purl.org/dc/elements/1.1/'
}
# prepare to read from the .epub file
zip = zipfile.ZipFile(fname)
mime = zip.read('mimetype')
if mime != "application/epub+zip\n" and not forceMime:
print mime
return None
# find the contents metafile
txt = zip.read('META-INF/container.xml')
tree = etree.fromstring(txt)
cfname = tree.xpath('n:rootfiles/n:rootfile/@full-path',namespaces=ns)[0]
# grab the metadata block from the contents metafile
cf = zip.read(cfname)
tree = etree.fromstring(cf)
px = tree.xpath('/pkg:package/pkg:metadata',namespaces=ns)
p = px[0]
# repackage the data
res = {}
for s in ['title','language','creator','date','identifier']:
r = p.xpath('dc:%s/text()'%(s),namespaces=ns)
if len(r) > 0 :
v = r[0]
else :
v = ""
res[s] = v
return res
for fnam in sys.argv[1:]:
r = get_epub_info(fnam)
print(r)
#os.rename(fnam, "%s - %s.pub" % (r['title'], r['creator']))
| {
"content_hash": "fea17391a947b8e6b376dc8c6434a85a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 77,
"avg_line_length": 26.75,
"alnum_prop": 0.5747663551401869,
"repo_name": "pzia/keepmydatas",
"id": "b69c8cfab062b3924f9e39fc370a5c93464c7224",
"size": "1284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/rename-epub.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94188"
}
],
"symlink_target": ""
} |
""" AutoProcessor class."""
import importlib
import inspect
import json
from collections import OrderedDict
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module
from ...feature_extraction_utils import FeatureExtractionMixin
from ...image_processing_utils import ImageProcessingMixin
from ...tokenization_utils import TOKENIZER_CONFIG_FILE
from ...utils import FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
from .feature_extraction_auto import AutoFeatureExtractor
from .image_processing_auto import AutoImageProcessor
from .tokenization_auto import AutoTokenizer
logger = logging.get_logger(__name__)
PROCESSOR_MAPPING_NAMES = OrderedDict(
[
("clip", "CLIPProcessor"),
("clipseg", "CLIPSegProcessor"),
("flava", "FlavaProcessor"),
("groupvit", "CLIPProcessor"),
("layoutlmv2", "LayoutLMv2Processor"),
("layoutlmv3", "LayoutLMv3Processor"),
("layoutxlm", "LayoutXLMProcessor"),
("markuplm", "MarkupLMProcessor"),
("owlvit", "OwlViTProcessor"),
("sew", "Wav2Vec2Processor"),
("sew-d", "Wav2Vec2Processor"),
("speech_to_text", "Speech2TextProcessor"),
("speech_to_text_2", "Speech2Text2Processor"),
("trocr", "TrOCRProcessor"),
("unispeech", "Wav2Vec2Processor"),
("unispeech-sat", "Wav2Vec2Processor"),
("vilt", "ViltProcessor"),
("vision-text-dual-encoder", "VisionTextDualEncoderProcessor"),
("wav2vec2", "Wav2Vec2Processor"),
("wav2vec2-conformer", "Wav2Vec2Processor"),
("wav2vec2_with_lm", "Wav2Vec2ProcessorWithLM"),
("wavlm", "Wav2Vec2Processor"),
("whisper", "WhisperProcessor"),
("xclip", "XCLIPProcessor"),
]
)
PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, PROCESSOR_MAPPING_NAMES)
def processor_class_from_name(class_name: str):
for module_name, processors in PROCESSOR_MAPPING_NAMES.items():
if class_name in processors:
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(f".{module_name}", "transformers.models")
try:
return getattr(module, class_name)
except AttributeError:
continue
for processor in PROCESSOR_MAPPING._extra_content.values():
if getattr(processor, "__name__", None) == class_name:
return processor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
main_module = importlib.import_module("transformers")
if hasattr(main_module, class_name):
return getattr(main_module, class_name)
return None
class AutoProcessor:
r"""
This is a generic processor class that will be instantiated as one of the processor classes of the library when
created with the [`AutoProcessor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoProcessor is designed to be instantiated "
"using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r"""
Instantiate one of the processor classes of the library from a pretrained model vocabulary.
The processor class to instantiate is selected based on the `model_type` property of the config object (either
passed as an argument or loaded from `pretrained_model_name_or_path` if possible):
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or
namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.
- a path to a *directory* containing a processor files saved using the `save_pretrained()` method,
e.g., `./my_model_directory/`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `huggingface-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final feature extractor object. If `True`, then this
functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
`kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (`Dict[str, Any]`, *optional*):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoProcessor
>>> # Download processor from huggingface.co and cache.
>>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
>>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*)
>>> processor = AutoProcessor.from_pretrained("./test/saved_model/")
```"""
config = kwargs.pop("config", None)
trust_remote_code = kwargs.pop("trust_remote_code", False)
kwargs["_from_auto"] = True
processor_class = None
processor_auto_map = None
# First, let's see if we have a preprocessor config.
# Filter the kwargs for `get_file_from_repo`.
get_file_from_repo_kwargs = {
key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if key in kwargs
}
# Let's start by checking whether the processor class is saved in an image processor
preprocessor_config_file = get_file_from_repo(
pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs
)
if preprocessor_config_file is not None:
config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
processor_class = config_dict.get("processor_class", None)
if "AutoProcessor" in config_dict.get("auto_map", {}):
processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
# If not found, let's check whether the processor class is saved in a feature extractor config
if preprocessor_config_file is not None and processor_class is None:
config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
processor_class = config_dict.get("processor_class", None)
if "AutoProcessor" in config_dict.get("auto_map", {}):
processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
if processor_class is None:
# Next, let's check whether the processor class is saved in a tokenizer
tokenizer_config_file = get_file_from_repo(
pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs
)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as reader:
config_dict = json.load(reader)
processor_class = config_dict.get("processor_class", None)
if "AutoProcessor" in config_dict.get("auto_map", {}):
processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
if processor_class is None:
# Otherwise, load config, if it can be loaded.
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
# And check if the config contains the processor class.
processor_class = getattr(config, "processor_class", None)
if hasattr(config, "auto_map") and "AutoProcessor" in config.auto_map:
processor_auto_map = config.auto_map["AutoProcessor"]
if processor_class is not None:
# If we have custom code for a feature extractor, we get the proper class.
if processor_auto_map is not None:
if not trust_remote_code:
raise ValueError(
f"Loading {pretrained_model_name_or_path} requires you to execute the feature extractor file "
"in that repo on your local machine. Make sure you have read the code there to avoid "
"malicious use, then set the option `trust_remote_code=True` to remove this error."
)
if kwargs.get("revision", None) is None:
logger.warning(
"Explicitly passing a `revision` is encouraged when loading a feature extractor with custom "
"code to ensure no malicious code has been contributed in a newer revision."
)
module_file, class_name = processor_auto_map.split(".")
processor_class = get_class_from_dynamic_module(
pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs
)
else:
processor_class = processor_class_from_name(processor_class)
return processor_class.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
# Last try: we use the PROCESSOR_MAPPING.
if type(config) in PROCESSOR_MAPPING:
return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs)
# At this stage, there doesn't seem to be a `Processor` class available for this model, so let's try a
# tokenizer.
try:
return AutoTokenizer.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
except Exception:
try:
return AutoImageProcessor.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
except Exception:
pass
try:
return AutoFeatureExtractor.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
except Exception:
pass
raise ValueError(
f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a "
"tokenizer or a feature extractor for this model. Make sure the repository contains the files of at least "
"one of those processing classes."
)
@staticmethod
def register(config_class, processor_class):
"""
Register a new processor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
processor_class ([`FeatureExtractorMixin`]): The processor to register.
"""
PROCESSOR_MAPPING.register(config_class, processor_class)
| {
"content_hash": "f37ac5428ce19ffd0690a2ee818f9096",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 125,
"avg_line_length": 49.311418685121104,
"alnum_prop": 0.628657638060487,
"repo_name": "huggingface/transformers",
"id": "06d44ab33e7640942a16330d119ac748dd8c12e4",
"size": "14856",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/transformers/models/auto/processing_auto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machine_extensions_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineExtensionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2017_03_30.aio.ComputeManagementClient`'s
:attr:`virtual_machine_extensions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
async def _create_or_update_initial(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtension, IO],
**kwargs: Any
) -> _models.VirtualMachineExtension:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineExtension]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(extension_parameters, (IO, bytes)):
_content = extension_parameters
else:
_json = self._serialize.body(extension_parameters, "VirtualMachineExtension")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"} # type: ignore
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: _models.VirtualMachineExtension,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Required.
:type extension_parameters: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineExtension or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Required.
:type extension_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineExtension or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtension, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineExtension]:
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation. Is either a model type or a IO type. Required.
:type extension_parameters: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineExtension or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineExtension]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=extension_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtensionUpdate, IO],
**kwargs: Any
) -> _models.VirtualMachineExtension:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineExtension]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(extension_parameters, (IO, bytes)):
_content = extension_parameters
else:
_json = self._serialize.body(extension_parameters, "VirtualMachineExtensionUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"} # type: ignore
@overload
async def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: _models.VirtualMachineExtensionUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Required.
:type extension_parameters:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineExtension or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Required.
:type extension_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineExtension or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
extension_parameters: Union[_models.VirtualMachineExtensionUpdate, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.VirtualMachineExtension]:
"""The operation to update the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation. Is either a model type or a IO type. Required.
:type extension_parameters:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtensionUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineExtension or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineExtension]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=extension_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"} # type: ignore
async def _delete_initial(
self, resource_group_name: str, vm_name: str, vm_extension_name: str, **kwargs: Any
) -> Optional[_models.OperationStatusResponse]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationStatusResponse]]
request = build_delete_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"} # type: ignore
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, vm_name: str, vm_extension_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.OperationStatusResponse]:
"""The operation to delete the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be deleted.
Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatusResponse]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OperationStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
vm_name: str,
vm_extension_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> _models.VirtualMachineExtension:
"""The operation to get the extension.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension. Required.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension. Required.
:type vm_extension_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtension or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineExtension
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2017-03-30")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineExtension]
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineExtension", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"} # type: ignore
| {
"content_hash": "7b7641d31d06c6bcb3baa2d8017e2f52",
"timestamp": "",
"source": "github",
"line_count": 716,
"max_line_length": 216,
"avg_line_length": 48.12709497206704,
"alnum_prop": 0.6561420818944252,
"repo_name": "Azure/azure-sdk-for-python",
"id": "75ae364e9246fd2c8933f406a0ec44f6b299ad8f",
"size": "34959",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/aio/operations/_virtual_machine_extensions_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""This module implements enough functionality to program the STM32F4xx over
DFU, without requiring dfu-util.
See app note AN3156 for a description of the DFU protocol.
See document UM0391 for a dscription of the DFuse file.
"""
from __future__ import print_function
import argparse
import collections
import inspect
import re
import struct
import sys
import usb.core
import usb.util
import zlib
# USB request __TIMEOUT
__TIMEOUT = 4000
# DFU commands
__DFU_DETACH = 0
__DFU_DNLOAD = 1
__DFU_UPLOAD = 2
__DFU_GETSTATUS = 3
__DFU_CLRSTATUS = 4
__DFU_GETSTATE = 5
__DFU_ABORT = 6
# DFU status
__DFU_STATE_APP_IDLE = 0x00
__DFU_STATE_APP_DETACH = 0x01
__DFU_STATE_DFU_IDLE = 0x02
__DFU_STATE_DFU_DOWNLOAD_SYNC = 0x03
__DFU_STATE_DFU_DOWNLOAD_BUSY = 0x04
__DFU_STATE_DFU_DOWNLOAD_IDLE = 0x05
__DFU_STATE_DFU_MANIFEST_SYNC = 0x06
__DFU_STATE_DFU_MANIFEST = 0x07
__DFU_STATE_DFU_MANIFEST_WAIT_RESET = 0x08
__DFU_STATE_DFU_UPLOAD_IDLE = 0x09
__DFU_STATE_DFU_ERROR = 0x0A
_DFU_DESCRIPTOR_TYPE = 0x21
__DFU_STATUS_STR = {
__DFU_STATE_APP_IDLE: "STATE_APP_IDLE",
__DFU_STATE_APP_DETACH: "STATE_APP_DETACH",
__DFU_STATE_DFU_IDLE: "STATE_DFU_IDLE",
__DFU_STATE_DFU_DOWNLOAD_SYNC: "STATE_DFU_DOWNLOAD_SYNC",
__DFU_STATE_DFU_DOWNLOAD_BUSY: "STATE_DFU_DOWNLOAD_BUSY",
__DFU_STATE_DFU_DOWNLOAD_IDLE: "STATE_DFU_DOWNLOAD_IDLE",
__DFU_STATE_DFU_MANIFEST_SYNC: "STATE_DFU_MANIFEST_SYNC",
__DFU_STATE_DFU_MANIFEST: "STATE_DFU_MANIFEST",
__DFU_STATE_DFU_MANIFEST_WAIT_RESET: "STATE_DFU_MANIFEST_WAIT_RESET",
__DFU_STATE_DFU_UPLOAD_IDLE: "STATE_DFU_UPLOAD_IDLE",
__DFU_STATE_DFU_ERROR: "STATE_DFU_ERROR",
}
# USB device handle
__dev = None
# Configuration descriptor of the device
__cfg_descr = None
__verbose = None
# USB DFU interface
__DFU_INTERFACE = 0
import inspect
if "length" in inspect.getfullargspec(usb.util.get_string).args:
# PyUSB 1.0.0.b1 has the length argument
def get_string(dev, index):
return usb.util.get_string(dev, 255, index)
else:
# PyUSB 1.0.0.b2 dropped the length argument
def get_string(dev, index):
return usb.util.get_string(dev, index)
def find_dfu_cfg_descr(descr):
if len(descr) == 9 and descr[0] == 9 and descr[1] == _DFU_DESCRIPTOR_TYPE:
nt = collections.namedtuple(
"CfgDescr",
[
"bLength",
"bDescriptorType",
"bmAttributes",
"wDetachTimeOut",
"wTransferSize",
"bcdDFUVersion",
],
)
return nt(*struct.unpack("<BBBHHH", bytearray(descr)))
return None
def init(**kwargs):
"""Initializes the found DFU device so that we can program it."""
global __dev, __cfg_descr
devices = get_dfu_devices(**kwargs)
if not devices:
raise ValueError("No DFU device found")
if len(devices) > 1:
raise ValueError("Multiple DFU devices found")
__dev = devices[0]
__dev.set_configuration()
# Claim DFU interface
usb.util.claim_interface(__dev, __DFU_INTERFACE)
# Find the DFU configuration descriptor, either in the device or interfaces
__cfg_descr = None
for cfg in __dev.configurations():
__cfg_descr = find_dfu_cfg_descr(cfg.extra_descriptors)
if __cfg_descr:
break
for itf in cfg.interfaces():
__cfg_descr = find_dfu_cfg_descr(itf.extra_descriptors)
if __cfg_descr:
break
# Get device into idle state
for attempt in range(4):
status = get_status()
if status == __DFU_STATE_DFU_IDLE:
break
elif status == __DFU_STATE_DFU_DOWNLOAD_IDLE or status == __DFU_STATE_DFU_UPLOAD_IDLE:
abort_request()
else:
clr_status()
def abort_request():
"""Sends an abort request."""
__dev.ctrl_transfer(0x21, __DFU_ABORT, 0, __DFU_INTERFACE, None, __TIMEOUT)
def clr_status():
"""Clears any error status (perhaps left over from a previous session)."""
__dev.ctrl_transfer(0x21, __DFU_CLRSTATUS, 0, __DFU_INTERFACE, None, __TIMEOUT)
def get_status():
"""Get the status of the last operation."""
stat = __dev.ctrl_transfer(0xA1, __DFU_GETSTATUS, 0, __DFU_INTERFACE, 6, 20000)
# firmware can provide an optional string for any error
if stat[5]:
message = get_string(__dev, stat[5])
if message:
print(message)
return stat[4]
def check_status(stage, expected):
status = get_status()
if status != expected:
raise SystemExit("DFU: %s failed (%s)" % (stage, __DFU_STATUS_STR.get(status, status)))
def mass_erase():
"""Performs a MASS erase (i.e. erases the entire device)."""
# Send DNLOAD with first byte=0x41
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, "\x41", __TIMEOUT)
# Execute last command
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def page_erase(addr):
"""Erases a single page."""
if __verbose:
print("Erasing page: 0x%x..." % (addr))
# Send DNLOAD with first byte=0x41 and page address
buf = struct.pack("<BI", 0x41, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("erase", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def set_address(addr):
"""Sets the address for the next operation."""
# Send DNLOAD with first byte=0x21 and page address
buf = struct.pack("<BI", 0x21, addr)
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("set address", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("set address", __DFU_STATE_DFU_DOWNLOAD_IDLE)
def write_memory(addr, buf, progress=None, progress_addr=0, progress_size=0):
"""Writes a buffer into memory. This routine assumes that memory has
already been erased.
"""
xfer_count = 0
xfer_bytes = 0
xfer_total = len(buf)
xfer_base = addr
while xfer_bytes < xfer_total:
if __verbose and xfer_count % 512 == 0:
print(
"Addr 0x%x %dKBs/%dKBs..."
% (xfer_base + xfer_bytes, xfer_bytes // 1024, xfer_total // 1024)
)
if progress and xfer_count % 2 == 0:
progress(progress_addr, xfer_base + xfer_bytes - progress_addr, progress_size)
# Set mem write address
set_address(xfer_base + xfer_bytes)
# Send DNLOAD with fw data
chunk = min(__cfg_descr.wTransferSize, xfer_total - xfer_bytes)
__dev.ctrl_transfer(
0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf[xfer_bytes : xfer_bytes + chunk], __TIMEOUT
)
# Execute last command
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_IDLE)
xfer_count += 1
xfer_bytes += chunk
def write_page(buf, xfer_offset):
"""Writes a single page. This routine assumes that memory has already
been erased.
"""
xfer_base = 0x08000000
# Set mem write address
set_address(xfer_base + xfer_offset)
# Send DNLOAD with fw data
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT)
# Execute last command
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_BUSY)
# Check command state
check_status("write memory", __DFU_STATE_DFU_DOWNLOAD_IDLE)
if __verbose:
print("Write: 0x%x " % (xfer_base + xfer_offset))
def exit_dfu():
"""Exit DFU mode, and start running the program."""
# Set jump address
set_address(0x08000000)
# Send DNLOAD with 0 length to exit DFU
__dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, None, __TIMEOUT)
try:
# Execute last command
if get_status() != __DFU_STATE_DFU_MANIFEST:
print("Failed to reset device")
# Release device
usb.util.dispose_resources(__dev)
except:
pass
def named(values, names):
"""Creates a dict with `names` as fields, and `values` as values."""
return dict(zip(names.split(), values))
def consume(fmt, data, names):
"""Parses the struct defined by `fmt` from `data`, stores the parsed fields
into a named tuple using `names`. Returns the named tuple, and the data
with the struct stripped off."""
size = struct.calcsize(fmt)
return named(struct.unpack(fmt, data[:size]), names), data[size:]
def cstring(string):
"""Extracts a null-terminated string from a byte array."""
return string.decode("utf-8").split("\0", 1)[0]
def compute_crc(data):
"""Computes the CRC32 value for the data passed in."""
return 0xFFFFFFFF & -zlib.crc32(data) - 1
def read_dfu_file(filename):
"""Reads a DFU file, and parses the individual elements from the file.
Returns an array of elements. Each element is a dictionary with the
following keys:
num - The element index.
address - The address that the element data should be written to.
size - The size of the element data.
data - The element data.
If an error occurs while parsing the file, then None is returned.
"""
print("File: {}".format(filename))
with open(filename, "rb") as fin:
data = fin.read()
crc = compute_crc(data[:-4])
elements = []
# Decode the DFU Prefix
#
# <5sBIB
# < little endian Endianness
# 5s char[5] signature "DfuSe"
# B uint8_t version 1
# I uint32_t size Size of the DFU file (without suffix)
# B uint8_t targets Number of targets
dfu_prefix, data = consume("<5sBIB", data, "signature version size targets")
print(
" %(signature)s v%(version)d, image size: %(size)d, "
"targets: %(targets)d" % dfu_prefix
)
for target_idx in range(dfu_prefix["targets"]):
# Decode the Image Prefix
#
# <6sBI255s2I
# < little endian Endianness
# 6s char[6] signature "Target"
# B uint8_t altsetting
# I uint32_t named Bool indicating if a name was used
# 255s char[255] name Name of the target
# I uint32_t size Size of image (without prefix)
# I uint32_t elements Number of elements in the image
img_prefix, data = consume(
"<6sBI255s2I", data, "signature altsetting named name " "size elements"
)
img_prefix["num"] = target_idx
if img_prefix["named"]:
img_prefix["name"] = cstring(img_prefix["name"])
else:
img_prefix["name"] = ""
print(
" %(signature)s %(num)d, alt setting: %(altsetting)s, "
'name: "%(name)s", size: %(size)d, elements: %(elements)d' % img_prefix
)
target_size = img_prefix["size"]
target_data = data[:target_size]
data = data[target_size:]
for elem_idx in range(img_prefix["elements"]):
# Decode target prefix
#
# <2I
# < little endian Endianness
# I uint32_t element Address
# I uint32_t element Size
elem_prefix, target_data = consume("<2I", target_data, "addr size")
elem_prefix["num"] = elem_idx
print(" %(num)d, address: 0x%(addr)08x, size: %(size)d" % elem_prefix)
elem_size = elem_prefix["size"]
elem_data = target_data[:elem_size]
target_data = target_data[elem_size:]
elem_prefix["data"] = elem_data
elements.append(elem_prefix)
if len(target_data):
print("target %d PARSE ERROR" % target_idx)
# Decode DFU Suffix
#
# <4H3sBI
# < little endian Endianness
# H uint16_t device Firmware version
# H uint16_t product
# H uint16_t vendor
# H uint16_t dfu 0x11a (DFU file format version)
# 3s char[3] ufd "UFD"
# B uint8_t len 16
# I uint32_t crc32 Checksum
dfu_suffix = named(
struct.unpack("<4H3sBI", data[:16]), "device product vendor dfu ufd len crc"
)
print(
" usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, "
"dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x" % dfu_suffix
)
if crc != dfu_suffix["crc"]:
print("CRC ERROR: computed crc32 is 0x%08x" % crc)
return
data = data[16:]
if data:
print("PARSE ERROR")
return
return elements
class FilterDFU(object):
"""Class for filtering USB devices to identify devices which are in DFU
mode.
"""
def __call__(self, device):
for cfg in device:
for intf in cfg:
return intf.bInterfaceClass == 0xFE and intf.bInterfaceSubClass == 1
def get_dfu_devices(*args, **kwargs):
"""Returns a list of USB devices which are currently in DFU mode.
Additional filters (like idProduct and idVendor) can be passed in
to refine the search.
"""
# Convert to list for compatibility with newer PyUSB
return list(usb.core.find(*args, find_all=True, custom_match=FilterDFU(), **kwargs))
def get_memory_layout(device):
"""Returns an array which identifies the memory layout. Each entry
of the array will contain a dictionary with the following keys:
addr - Address of this memory segment.
last_addr - Last address contained within the memory segment.
size - Size of the segment, in bytes.
num_pages - Number of pages in the segment.
page_size - Size of each page, in bytes.
"""
cfg = device[0]
intf = cfg[(0, 0)]
mem_layout_str = get_string(device, intf.iInterface)
mem_layout = mem_layout_str.split("/")
result = []
for mem_layout_index in range(1, len(mem_layout), 2):
addr = int(mem_layout[mem_layout_index], 0)
segments = mem_layout[mem_layout_index + 1].split(",")
seg_re = re.compile(r"(\d+)\*(\d+)(.)(.)")
for segment in segments:
seg_match = seg_re.match(segment)
num_pages = int(seg_match.groups()[0], 10)
page_size = int(seg_match.groups()[1], 10)
multiplier = seg_match.groups()[2]
if multiplier == "K":
page_size *= 1024
if multiplier == "M":
page_size *= 1024 * 1024
size = num_pages * page_size
last_addr = addr + size - 1
result.append(
named(
(addr, last_addr, size, num_pages, page_size),
"addr last_addr size num_pages page_size",
)
)
addr += size
return result
def list_dfu_devices(*args, **kwargs):
"""Prints a lits of devices detected in DFU mode."""
devices = get_dfu_devices(*args, **kwargs)
if not devices:
raise SystemExit("No DFU capable devices found")
for device in devices:
print(
"Bus {} Device {:03d}: ID {:04x}:{:04x}".format(
device.bus, device.address, device.idVendor, device.idProduct
)
)
layout = get_memory_layout(device)
print("Memory Layout")
for entry in layout:
print(
" 0x{:x} {:2d} pages of {:3d}K bytes".format(
entry["addr"], entry["num_pages"], entry["page_size"] // 1024
)
)
def write_elements(elements, mass_erase_used, progress=None):
"""Writes the indicated elements into the target memory,
erasing as needed.
"""
mem_layout = get_memory_layout(__dev)
for elem in elements:
addr = elem["addr"]
size = elem["size"]
data = elem["data"]
elem_size = size
elem_addr = addr
if progress and elem_size:
progress(elem_addr, 0, elem_size)
while size > 0:
write_size = size
if not mass_erase_used:
for segment in mem_layout:
if addr >= segment["addr"] and addr <= segment["last_addr"]:
# We found the page containing the address we want to
# write, erase it
page_size = segment["page_size"]
page_addr = addr & ~(page_size - 1)
if addr + write_size > page_addr + page_size:
write_size = page_addr + page_size - addr
page_erase(page_addr)
break
write_memory(addr, data[:write_size], progress, elem_addr, elem_size)
data = data[write_size:]
addr += write_size
size -= write_size
if progress:
progress(elem_addr, addr - elem_addr, elem_size)
def cli_progress(addr, offset, size):
"""Prints a progress report suitable for use on the command line."""
width = 25
done = offset * width // size
print(
"\r0x{:08x} {:7d} [{}{}] {:3d}% ".format(
addr, size, "=" * done, " " * (width - done), offset * 100 // size
),
end="",
)
try:
sys.stdout.flush()
except OSError:
pass # Ignore Windows CLI "WinError 87" on Python 3.6
if offset == size:
print("")
def main():
"""Test program for verifying this files functionality."""
global __verbose
# Parse CMD args
parser = argparse.ArgumentParser(description="DFU Python Util")
parser.add_argument(
"-l", "--list", help="list available DFU devices", action="store_true", default=False
)
parser.add_argument("--vid", help="USB Vendor ID", type=lambda x: int(x, 0), default=None)
parser.add_argument("--pid", help="USB Product ID", type=lambda x: int(x, 0), default=None)
parser.add_argument(
"-m", "--mass-erase", help="mass erase device", action="store_true", default=False
)
parser.add_argument(
"-u", "--upload", help="read file from DFU device", dest="path", default=False
)
parser.add_argument("-x", "--exit", help="Exit DFU", action="store_true", default=False)
parser.add_argument(
"-v", "--verbose", help="increase output verbosity", action="store_true", default=False
)
args = parser.parse_args()
__verbose = args.verbose
kwargs = {}
if args.vid:
kwargs["idVendor"] = args.vid
if args.pid:
kwargs["idProduct"] = args.pid
if args.list:
list_dfu_devices(**kwargs)
return
init(**kwargs)
command_run = False
if args.mass_erase:
print("Mass erase...")
mass_erase()
command_run = True
if args.path:
elements = read_dfu_file(args.path)
if not elements:
print("No data in dfu file")
return
print("Writing memory...")
write_elements(elements, args.mass_erase, progress=cli_progress)
print("Exiting DFU...")
exit_dfu()
command_run = True
if args.exit:
print("Exiting DFU...")
exit_dfu()
command_run = True
if command_run:
print("Finished")
else:
print("No command specified")
if __name__ == "__main__":
main()
| {
"content_hash": "dbebb94307ba51aadd203a7b9b8d8b8b",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 99,
"avg_line_length": 31.8562197092084,
"alnum_prop": 0.5789847355342563,
"repo_name": "adafruit/circuitpython",
"id": "ce34b08a58842b6b7c3a1c3899d8d9b98c6c8a74",
"size": "20006",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/pydfu.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10241"
},
{
"name": "C",
"bytes": "18450191"
},
{
"name": "C++",
"bytes": "476"
},
{
"name": "CMake",
"bytes": "18203"
},
{
"name": "CSS",
"bytes": "316"
},
{
"name": "HTML",
"bytes": "10126"
},
{
"name": "JavaScript",
"bytes": "13854"
},
{
"name": "Jinja",
"bytes": "11034"
},
{
"name": "Makefile",
"bytes": "330832"
},
{
"name": "Python",
"bytes": "1423935"
},
{
"name": "Shell",
"bytes": "18681"
}
],
"symlink_target": ""
} |
"""
Middleware to replace the plain text message body of an error
response with one formatted so the client can parse it.
Based on pecan.middleware.errordocument
"""
import json
import six
from zun.common.i18n import _
class ParsableErrorMiddleware(object):
"""Replace error body with something the client can parse."""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# Request for this state, modified by replace_start_response()
# and used when an error is being reported.
state = {}
def replacement_start_response(status, headers, exc_info=None):
"""Overrides the default response to make errors parsable."""
try:
status_code = int(status.split(' ')[0])
state['status_code'] = status_code
except (ValueError, TypeError): # pragma: nocover
raise Exception(_(
'ErrorDocumentMiddleware received an invalid '
'status %s') % status)
else:
if (state['status_code'] // 100) not in (2, 3):
# Remove some headers so we can replace them later
# when we have the full error message and can
# compute the length.
headers = [(h, v)
for (h, v) in headers
if h not in ('Content-Length', 'Content-Type')
]
# Save the headers in case we need to modify them.
state['headers'] = headers
return start_response(status, headers, exc_info)
app_iter = self.app(environ, replacement_start_response)
if (state['status_code'] // 100) not in (2, 3):
errs = []
for err_str in app_iter:
err = {}
try:
err = json.loads(err_str.decode('utf-8'))
except ValueError:
pass
if 'title' in err and 'description' in err:
title = err['title']
desc = err['description']
else:
title = ''
desc = ''
code = err['faultcode'].lower() if 'faultcode' in err else ''
errs.append({
'request_id': '',
'code': code,
'status': state['status_code'],
'title': title,
'detail': desc,
'links': []
})
body = [six.b(json.dumps({'errors': errs}))]
state['headers'].append(('Content-Type', 'application/json'))
state['headers'].append(('Content-Length', str(len(body[0]))))
else:
body = app_iter
return body
| {
"content_hash": "bd1149c59779422add94f2cfd174040e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 35.77777777777778,
"alnum_prop": 0.48274672187715667,
"repo_name": "kevin-zhaoshuai/zun",
"id": "1e02d42970760a1468dae3a525d9d987c48ef0e6",
"size": "3499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zun/api/middleware/parsable_error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "431"
},
{
"name": "Python",
"bytes": "1238275"
},
{
"name": "Ruby",
"bytes": "656"
},
{
"name": "Shell",
"bytes": "21640"
}
],
"symlink_target": ""
} |
import functools
# Module symbols to export
__all__ = ('operators', 'get')
# List of built-in operators
operators = (
# Module name # Operator class to import
('attributes', ),
('url', 'UrlOperator'),
('body', 'BodyOperator'),
('json_body', 'JsonOperator'),
('method', 'MethodOperator'),
('header', 'HeaderOperator'),
('json_schema', 'JsonSchemaOperator'),
('content', 'ContentTypeOperator'),
('status', 'StatusOperator', 'OkStatusOperator',
'ServerErrorStatusOperator', 'BadRequestStatusOperator'),
)
def get():
"""
Loads the built-in operators into the global test engine.
"""
def reducer(acc, operator):
module, symbols = operator[0], operator[1:]
path = 'grappa_http.operators.{}'.format(module)
# Dynamically import modules
operator = __import__(path, None, None, symbols)
# Register operators in the test engine
return acc + [getattr(operator, symbol) for symbol in symbols]
return functools.reduce(reducer, operators, [])
| {
"content_hash": "4182425568177a9237477f880248590d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 29.783783783783782,
"alnum_prop": 0.604355716878403,
"repo_name": "grappa-py/http",
"id": "06b31e47a2950e40c4894165df078d515221b3fe",
"size": "1126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grappa_http/operators/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1620"
},
{
"name": "Python",
"bytes": "57902"
}
],
"symlink_target": ""
} |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Selection import *
from Alphas.HistoricalReturnsAlphaModel import HistoricalReturnsAlphaModel
from QuantConnect.Algorithm.Framework.Execution import *
from QuantConnect.Algorithm.Framework.Risk import *
from Portfolio.BlackLittermanOptimizationPortfolioConstructionModel import *
from Portfolio.UnconstrainedMeanVariancePortfolioOptimizer import UnconstrainedMeanVariancePortfolioOptimizer
from Risk.NullRiskManagementModel import NullRiskManagementModel
### <summary>
### Black-Litterman framework algorithm
### Uses the HistoricalReturnsAlphaModel and the BlackLittermanPortfolioConstructionModel
### to create an algorithm that rebalances the portfolio according to Black-Litterman portfolio optimization
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="trading and orders" />
class BlackLittermanPortfolioOptimizationFrameworkAlgorithm(QCAlgorithm):
'''Black-Litterman Optimization algorithm.'''
def Initialize(self):
# Set requested data resolution
self.UniverseSettings.Resolution = Resolution.Minute
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.symbols = [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in [ 'AIG', 'BAC', 'IBM', 'SPY' ] ]
optimizer = UnconstrainedMeanVariancePortfolioOptimizer()
# set algorithm framework models
self.SetUniverseSelection(CoarseFundamentalUniverseSelectionModel(self.coarseSelector))
self.SetAlpha(HistoricalReturnsAlphaModel(resolution = Resolution.Daily))
self.SetPortfolioConstruction(BlackLittermanOptimizationPortfolioConstructionModel(optimizer = optimizer))
self.SetExecution(ImmediateExecutionModel())
self.SetRiskManagement(NullRiskManagementModel())
def coarseSelector(self, coarse):
# Drops SPY after the 8th
last = 3 if self.Time.day > 8 else len(self.symbols)
return self.symbols[0:last]
def OnOrderEvent(self, orderEvent):
if orderEvent.Status == OrderStatus.Filled:
self.Debug(orderEvent) | {
"content_hash": "7bae5b158cff0cfc1c71b774ec248d54",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 116,
"avg_line_length": 45.136986301369866,
"alnum_prop": 0.7705614567526555,
"repo_name": "Jay-Jay-D/LeanSTP",
"id": "c8b4fa0ad728fc597b1d54aa4ddf25e8b6e937a8",
"size": "3297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithm.Python/BlackLittermanPortfolioOptimizationFrameworkAlgorithm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2540"
},
{
"name": "C#",
"bytes": "17013438"
},
{
"name": "Dockerfile",
"bytes": "1229"
},
{
"name": "F#",
"bytes": "1723"
},
{
"name": "HTML",
"bytes": "2607907"
},
{
"name": "Java",
"bytes": "852"
},
{
"name": "Jupyter Notebook",
"bytes": "22467"
},
{
"name": "Python",
"bytes": "852757"
},
{
"name": "Shell",
"bytes": "2307"
},
{
"name": "Visual Basic",
"bytes": "2448"
}
],
"symlink_target": ""
} |
"""209. Minimum Size Subarray Sum
https://leetcode.com/problems/minimum-size-subarray-sum/
Given an array of n positive integers and a positive integer s,
find the minimal length of a contiguous subarray of which the sum ≥ s.
If there isn't one, return 0 instead.
Example:
Input: s = 7, nums = [2,3,1,2,4,3]
Output: 2
Explanation: the subarray [4,3] has the minimal length under the problem constraint.
Follow up:
If you have figured out the O(n) solution, try coding another solution
of which the time complexity is O(n log n).
"""
from typing import List
class Solution:
def min_sub_arrayLen(self, s: int, nums: List[int]) -> int:
# double pointers
n = len(nums)
ans = n + 1
i, j, cur = 0, 0, 0
move_i = False
while i <= j < n:
if move_i:
cur -= nums[i - 1]
else:
cur += nums[j]
if cur >= s:
ans = min(ans, j - i + 1)
i += 1
move_i = True
else:
j += 1
move_i = False
return ans if ans <= n else 0
| {
"content_hash": "4272d252bc5fb52c342fd89676ee26b1",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 84,
"avg_line_length": 27.463414634146343,
"alnum_prop": 0.5488454706927176,
"repo_name": "isudox/leetcode-solution",
"id": "392fdec4063bb20a18b9ac9b23565b6328452628",
"size": "1128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-algorithm/leetcode/problem_209.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "16121"
},
{
"name": "Java",
"bytes": "118043"
},
{
"name": "Python",
"bytes": "151015"
}
],
"symlink_target": ""
} |
import datetime
from django.db import connection, models
from django.core.management.color import no_style
from django.db.utils import DatabaseError
from south.db import generic
class DatabaseOperations(generic.DatabaseOperations):
backend_name = 'firebird'
alter_string_set_type = 'ALTER %(column)s TYPE %(type)s'
alter_string_set_default = 'ALTER %(column)s SET DEFAULT %(default)s;'
alter_string_drop_null = ''
add_column_string = 'ALTER TABLE %s ADD %s;'
allows_combined_alters = False
def _fill_constraint_cache(self, db_name, table_name):
self._constraint_cache.setdefault(db_name, {})
self._constraint_cache[db_name][table_name] = {}
rows = self.execute("""
SELECT
rc.RDB$CONSTRAINT_NAME,
rc.RDB$CONSTRAINT_TYPE,
cc.RDB$TRIGGER_NAME
FROM rdb$relation_constraints rc
JOIN rdb$check_constraints cc
ON rc.rdb$constraint_name = cc.rdb$constraint_name
WHERE rc.rdb$constraint_type = 'NOT NULL'
AND rc.rdb$relation_name = '%s'
""" % table_name)
for constraint, kind, column in rows:
self._constraint_cache[db_name][table_name].setdefault(column, set())
self._constraint_cache[db_name][table_name][column].add((kind, constraint))
return
def _alter_column_set_null(self, table_name, column_name, is_null):
sql = """
UPDATE RDB$RELATION_FIELDS SET RDB$NULL_FLAG = %(null_flag)s
WHERE RDB$FIELD_NAME = '%(column)s'
AND RDB$RELATION_NAME = '%(table_name)s'
"""
null_flag = 'NULL' if is_null else '1'
return sql % {
'null_flag': null_flag,
'column': column_name.upper(),
'table_name': table_name.upper()
}
def _column_has_default(self, params):
sql = """
SELECT a.RDB$DEFAULT_VALUE
FROM RDB$RELATION_FIELDS a
WHERE a.RDB$FIELD_NAME = '%(column)s'
AND a.RDB$RELATION_NAME = '%(table_name)s'
"""
value = self.execute(sql % params)
return True if value else False
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# Next, set any default
if not field.null and field.has_default():
default = field.get_default()
sqls.append(('ALTER COLUMN %s SET DEFAULT %%s ' % (self.quote_name(name),), [default]))
elif self._column_has_default(params):
sqls.append(('ALTER COLUMN %s DROP DEFAULT' % (self.quote_name(name),), []))
@generic.invalidate_table_constraints
def create_table(self, table_name, fields):
qn = self.quote_name(table_name)
columns = []
autoinc_sql = ''
for field_name, field in fields:
col = self.column_sql(table_name, field_name, field)
if not col:
continue
#col = self.adj_column_sql(col)
columns.append(col)
if isinstance(field, models.AutoField):
autoinc_sql = connection.ops.autoinc_sql(table_name, field_name)
sql = 'CREATE TABLE %s (%s);' % (qn, ', '.join([col for col in columns]))
self.execute(sql)
if autoinc_sql:
self.execute(autoinc_sql[0])
self.execute(autoinc_sql[1])
def column_sql(self, table_name, field_name, field, tablespace='', with_name=True, field_prepared=False):
"""
Creates the SQL snippet for a column. Used by add_column and add_table.
"""
# If the field hasn't already been told its attribute name, do so.
if not field_prepared:
field.set_attributes_from_name(field_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Possible hook to fiddle with the fields (e.g. defaults & TEXT on MySQL)
field = self._field_sanity(field)
try:
sql = field.db_type(connection=self._get_connection())
except TypeError:
sql = field.db_type()
if sql:
# Some callers, like the sqlite stuff, just want the extended type.
if with_name:
field_output = [self.quote_name(field.column), sql]
else:
field_output = [sql]
if field.primary_key:
field_output.append('NOT NULL PRIMARY KEY')
elif field.unique:
# Just use UNIQUE (no indexes any more, we have delete_unique)
field_output.append('UNIQUE')
tablespace = field.db_tablespace or tablespace
if tablespace and getattr(self._get_connection().features, "supports_tablespaces", False) and field.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self._get_connection().ops.tablespace_sql(tablespace, inline=True))
sql = ' '.join(field_output)
sqlparams = ()
# if the field is "NOT NULL" and a default value is provided, create the column with it
# this allows the addition of a NOT NULL field to a table with existing rows
if not getattr(field, '_suppress_default', False):
if field.has_default():
default = field.get_default()
# If the default is actually None, don't add a default term
if default is not None:
# If the default is a callable, then call it!
if callable(default):
default = default()
# Now do some very cheap quoting. TODO: Redesign return values to avoid this.
if isinstance(default, basestring):
default = "'%s'" % default.replace("'", "''")
elif isinstance(default, (datetime.date, datetime.time, datetime.datetime)):
default = "'%s'" % default
# Escape any % signs in the output (bug #317)
if isinstance(default, basestring):
default = default.replace("%", "%%")
# Add it in
sql += " DEFAULT %s"
sqlparams = (default)
elif (not field.null and field.blank) or (field.get_default() == ''):
if field.empty_strings_allowed and self._get_connection().features.interprets_empty_strings_as_nulls:
sql += " DEFAULT ''"
# Error here would be nice, but doesn't seem to play fair.
#else:
# raise ValueError("Attempting to add a non null column that isn't character based without an explicit default value.")
# Firebird need set not null after of default value keyword
if not field.null:
field_output.append('NOT NULL')
if field.rel and self.supports_foreign_keys:
self.add_deferred_sql(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
# Things like the contrib.gis module fields have this in 1.1 and below
if hasattr(field, 'post_create_sql'):
for stmt in field.post_create_sql(no_style(), table_name):
self.add_deferred_sql(stmt)
# In 1.2 and above, you have to ask the DatabaseCreation stuff for it.
# This also creates normal indexes in 1.1.
if hasattr(self._get_connection().creation, "sql_indexes_for_field"):
# Make a fake model to pass in, with only db_table
model = self.mock_model("FakeModelForGISCreation", table_name)
for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()):
self.add_deferred_sql(stmt)
if sql:
return sql % sqlparams
else:
return None
def _drop_constraints(self, table_name, name, field):
if self.has_check_constraints:
check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK")
for constraint in check_constraints:
self.execute(self.delete_check_sql % {
'table': self.quote_name(table_name),
'constraint': self.quote_name(constraint),
})
# Drop or add UNIQUE constraint
unique_constraint = list(self._constraints_affecting_columns(table_name, [name], "UNIQUE"))
if field.unique and not unique_constraint:
self.create_unique(table_name, [name])
elif not field.unique and unique_constraint:
self.delete_unique(table_name, [name])
# Drop all foreign key constraints
try:
self.delete_foreign_key(table_name, name)
except ValueError:
# There weren't any
pass
@generic.invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
if self.dry_run:
return
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
# Add _id or whatever if we need to
field.set_attributes_from_name(name)
if not explicit_name:
name = field.column
else:
field.column = name
if not ignore_constraints:
# Drop all check constraints. Note that constraints will be added back
# with self.alter_string_set_type and self.alter_string_drop_null.
self._drop_constraints(table_name, name, field)
# First, change the type
params = {
"column": self.quote_name(name),
"type": self._db_type_for_alter_column(field),
"table_name": table_name
}
# SQLs is a list of (SQL, values) pairs.
sqls = []
sqls_extra = []
# Only alter the column if it has a type (Geometry ones sometimes don't)
if params["type"] is not None:
sqls.append((self.alter_string_set_type % params, []))
# Add any field- and backend- specific modifications
self._alter_add_column_mods(field, name, params, sqls)
# Next, nullity: modified, firebird doesn't support DROP NOT NULL
sqls_extra.append(self._alter_column_set_null(table_name, name, field.null))
# Next, set any default
self._alter_set_defaults(field, name, params, sqls)
# Finally, actually change the column
if self.allows_combined_alters:
sqls, values = zip(*sqls)
self.execute(
"ALTER TABLE %s %s;" % (self.quote_name(table_name), ", ".join(sqls)),
generic.flatten(values),
)
else:
# Databases like e.g. MySQL don't like more than one alter at once.
for sql, values in sqls:
try:
self.execute("ALTER TABLE %s %s;" % (self.quote_name(table_name), sql), values)
except DatabaseError as e:
print e
# Execute extra sql, which don't need ALTER TABLE statement
for sql in sqls_extra:
self.execute(sql)
if not ignore_constraints:
# Add back FK constraints if needed
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
| {
"content_hash": "0549def62adfd6499653b8a58f98b62a",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 142,
"avg_line_length": 41.372168284789645,
"alnum_prop": 0.5622653316645807,
"repo_name": "mozilla/make.mozilla.org",
"id": "7f47cf79242774dc927bbb8365201448bc3664b5",
"size": "12796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/south/db/firebird.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "334625"
},
{
"name": "Puppet",
"bytes": "14621"
},
{
"name": "Python",
"bytes": "3683223"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "4446"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('characters', '0002_alignment'),
]
operations = [
migrations.CreateModel(
name='Class',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Race',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "ee0b73d3ad472806815b124a2b3c9095",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 114,
"avg_line_length": 29.37142857142857,
"alnum_prop": 0.5058365758754864,
"repo_name": "mpirnat/django-tutorial-v2",
"id": "fbdf75700c0bc20661afb443e1d24f7dabb0b948",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "characters/migrations/0003_class_race.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23106"
}
],
"symlink_target": ""
} |
"""Common argument parsing-related code for unexpected pass finders."""
import logging
def AddCommonArguments(parser):
"""Adds arguments that are common to all unexpected pass finders.
Args:
parser: An argparse.ArgumentParser instance to add arguments to.
"""
parser.add_argument('--project',
required=True,
help='The billing project to use for BigQuery queries. '
'Must have access to the ResultDB BQ tables, e.g. '
'"chrome-luci-data.chromium.gpu_ci_test_results".')
parser.add_argument('--num-samples',
type=int,
default=100,
help='The number of recent builds to query.')
parser.add_argument('--output-format',
choices=[
'html',
'print',
],
default='html',
help='How to output script results.')
parser.add_argument('--remove-stale-expectations',
action='store_true',
default=False,
help='Automatically remove any expectations that are '
'determined to be stale from the expectation file.')
parser.add_argument('--modify-semi-stale-expectations',
action='store_true',
default=False,
help='If any semi-stale expectations are found, prompt '
'the user about the modification of each one.')
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help='Increase logging verbosity, can be passed multiple '
'times.')
parser.add_argument('-q',
'--quiet',
action='store_true',
default=False,
help='Disable logging for non-errors.')
parser.add_argument('--large-query-mode',
action='store_true',
default=False,
help='Run the script in large query mode. This incurs '
'a significant performance hit, but allows the use of '
'larger sample sizes on large test suites by partially '
'working around a hard memory limit in BigQuery.')
parser.add_argument('--expectation-grace-period',
type=int,
default=7,
help=('How many days old an expectation needs to be in '
'order to be a candidate for being removed or '
'modified. This prevents newly added expectations '
'from being removed before a sufficient amount of '
'data has been generated with the expectation '
'active. Set to a negative value to disable.'))
def SetLoggingVerbosity(args):
"""Sets logging verbosity based on parsed arguments.
Args:
args: Parsed arguments from an argparse.ArgumentParser.
"""
if args.quiet:
args.verbose = -1
verbosity_level = args.verbose
if verbosity_level == -1:
level = logging.ERROR
elif verbosity_level == 0:
level = logging.WARNING
elif verbosity_level == 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.getLogger().setLevel(level)
| {
"content_hash": "c183c6a5c84efc03bdc09fb466aa9b3c",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 80,
"avg_line_length": 41.92857142857143,
"alnum_prop": 0.5227143668370244,
"repo_name": "ric2b/Vivaldi-browser",
"id": "66abbd85f1a6d499b60d0b08999824f4f301a8cb",
"size": "3684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/testing/unexpected_passes_common/argument_parsing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
try:
import wpilib
except ImportError:
from pyfrc import wpilib
class Drive(object):
'''
The sole interaction between the robot and its driving system
occurs here. Anything that wants to drive the robot must go
through this class.
'''
def __init__(self, robotDrive, analog_channel,gyro):
'''
Constructor.
:param robotDrive: a `wpilib.RobotDrive` object
'''
# set defaults here
self.ultraSonic = analog_channel
self.x = 0
self.y = 0
self.rotation = 0
self.gyro=gyro
self.angle_constant = .040
self.gyro_enabled = True
self.robotDrive = robotDrive
#
# Verb functions -- these functions do NOT talk to motors directly. This
# allows multiple callers in the loop to call our functions without
# conflicts.
#
def move(self, x, y, rotation):
'''
Causes the robot to move
:param x: The speed that the robot should drive in the X direction. 1 is right [-1.0..1.0]
:param y: The speed that the robot should drive in the Y direction. -1 is forward. [-1.0..1.0]
:param rotation: The rate of rotation for the robot that is completely independent of the translation. 1 is rotate to the right [-1.0..1.0]
'''
self.x = x
self.y = y
self.rotation = rotation
def closePosition(self):
'''returns true if the robot is in shooting range, false if it's not'''
volts = self.ultraSonic.GetAverageVoltage()
if volts <= 1.75 and volts >= 1.5:
return True
else:
return False
def set_gyro_enabled(self, value):
self.gyro_enabled = value
def return_gyro_angle(self):
return self.gyro.GetAngle()
def reset_gyro_angle(self):
self.gyro.Reset()
def set_angle_constant(self, constant):
'''Sets the constant that is used to determine the robot turning speed'''
self.angle_constant = constant
def angle_rotation(self, target_angle):
'''
Adjusts the robot so that it points at a particular angle. Returns True
if the robot is near the target angle, False otherwise
:param target_angle: Angle to point at, in degrees
:returns: True if near angle, False otherwise
'''
if not self.gyro_enabled:
return False
angleOffset = target_angle - self.return_gyro_angle()
if angleOffset < -1 or angleOffset > 1:
self.rotation = angleOffset*self.angle_constant
self.rotation = max(min(0.5, self.rotation), -0.5)
return False
return True
#
# Actually tells the motors to do something
#
def doit(self):
''' actually does stuff'''
self.robotDrive.MecanumDrive_Cartesian(self.y, self.x, self.rotation*-1)
#print('x=%s, y=%s, r=%s ' % (self.x, self.y, self.rotation))
# by default, the robot shouldn't move
self.x = 0
self.y = 0
self.rotation = 0
| {
"content_hash": "83cf1ddbb6ff42016ba608831e7a1f5f",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 143,
"avg_line_length": 24.300884955752213,
"alnum_prop": 0.6711580480699199,
"repo_name": "frc1418/2014",
"id": "566830d92499e06e84dfd0cce1112b69edc6e6b6",
"size": "2747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robot/robot/src/components/drive.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AutoIt",
"bytes": "686"
},
{
"name": "Python",
"bytes": "307255"
}
],
"symlink_target": ""
} |
"""Script to generate the raw sub-package APIs
Basically just drives OpenGLGenerator with options to produce
the various modules we want...
"""
import os, sys, logging, re, compileall
import openglgenerator
from OpenGL import platform
try:
from OpenGL import GL
except (ImportError, AttributeError), err:
pass
# put our OpenGL directory on the search path, just in case...
sys.path.insert( 0, os.path.abspath( '..' ) )
log = logging.getLogger( 'generateraw' )
MODULE_DEFINITIONS = [
('GL', ('gl[A-Z0-9].*','GL_.*')),
('GLU',('glu[A-Z0-9].*','GLU[_a-z0-9].*')),
('GLUT', ('glut[A-Z0-9].*','GLUT[_a-z0-9].*')),
('GLE', None),
('GLX', None),
('WGL', ('wgl.*','WGL.*',)),
('AGL', None),
]
def filterModules( arguments ):
"""Filter the set of modules according to command-line options
Basically no args == do everything, otherwise only process modules
declared here...
"""
if arguments:
definitions = [
x for x in MODULE_DEFINITIONS
if x[0] in arguments
]
else:
definitions = MODULE_DEFINITIONS
return definitions
def main():
baseModules = [
'OpenGL.constants',
]
known_symbols = openglgenerator.OpenGLGenerator.loadKnownSymbols(
baseModules
)
definedSymbols = known_symbols.copy()
for (module,expressions) in filterModules( sys.argv[1:] ):
log.info( "Processing module: %s", module )
if expressions:
expressions = [re.compile(e) for e in expressions]
xmlFile = '%s.xml'%( module.lower(), )
directory = '../OpenGL/raw/%(module)s'%locals()
try:
os.makedirs( directory )
except OSError, err:
pass
constantsFile = os.path.join( directory, 'constants.py' )
rawFile = os.path.join( directory, '__init__.py' )
open( rawFile, 'w' ).close()
annotationsFile = os.path.join( directory, 'annotations.py' )
dll = getattr( platform, module, None )
if dll and os.path.isfile( xmlFile ):
log.info( "Found DLL: %s and have XML source file: %s", dll, xmlFile )
# first the constants file...
log.info( "Generating constants %s", constantsFile )
gen = openglgenerator.OpenGLGenerator(
open(constantsFile,'w'),
generate_comments = False,
searched_dlls = [ dll ],
known_symbols = definedSymbols,
module_header = '''"""Constants for OpenGL.%(module)s
Automatically generated by the generateraw script, do not edit!
"""
'''%locals(),
)
items = gen.load_typedefs( xmlFile , types = [
openglgenerator.codegenerator.typedesc.Variable, # ick!
], expressions = expressions)
gen.produce( items )
gen.output.close()
log.info( "Generating raw API %s", rawFile )
constantSymbols = gen.loadKnownSymbols(
['OpenGL.raw.%(module)s.constants'%locals()],
flags = gen.EXPORT_SYMBOL, # don't import, do export
doReload = True,
)
constantSymbols.update( definedSymbols )
constantSymbols.update( known_symbols )
gen = openglgenerator.OpenGLGenerator(
open(rawFile,'w'),
generate_comments = True,
searched_dlls = [ dll ],
known_symbols = constantSymbols,
module_header = '''# -*- coding: iso-8859-1 -*-
"""Raw (C-style) API for OpenGL.%(module)s
Automatically generated by the generateraw script, do not edit!
"""
from OpenGL.raw.%(module)s.constants import *
'''%locals(),
)
items = gen.load_typedefs( xmlFile, expressions = expressions )
gen.produce( items )
gen.output.close()
log.info( "Generating annotations %s", annotationsFile )
gen = openglgenerator.OpenGLGenerator(
open(annotationsFile,'w'),
generate_comments = True,
searched_dlls = [ dll ],
emitters = [ openglgenerator.OpenGLDecorator() ],
known_symbols = definedSymbols,
module_header = '''"""Array-size annotations for OpenGL.raw.%(module)s
Automatically generated by the generateraw script, do not edit!
"""
from OpenGL.raw import %(module)s as raw
'''%locals(),
)
items = gen.load_typedefs( xmlFile, types = [
openglgenerator.codegenerator.typedesc.Function, # ick!
], expressions = expressions)
gen.produce( items )
gen.output.close()
log.info( """Suppressing future output of already-defined functions/structures: %s""", module )
definedSymbols.update(
gen.loadKnownSymbols(
['OpenGL.raw.%(module)s'%locals()],
flags = 0, # neither import nor export from future operations...
doReload = True,
)
)
definedSymbols.update(
gen.loadKnownSymbols(
['OpenGL.raw.%(module)s.constants'%locals()],
flags = 0, # suppress future export of the constants
doReload = True,
)
)
definedSymbols.update( known_symbols )
if module == 'GL':
# filter out the higher GL version stuff as well...
# obviously you need to have the version stuff generated already
# to make this work!
for version in ('1_2','1_3','1_4','1_5','2_0'):
log.info( 'Suppressing exports from Core GL Version %s', version )
definedSymbols.update(
gen.loadKnownSymbols(
['OpenGL.raw.GL.VERSION.GL_%(version)s'%locals()],
flags = 0, # suppress future export of the constants
doReload = True,
)
)
path = '../OpenGL/raw/%(module)s'%locals()
log.info( 'Forcing recompilation of %s', path )
compileall.compile_dir(path, maxlevels=2, force=True, quiet=True)
if __name__ == "__main__":
logging.basicConfig()
#logging.getLogger( 'codegenerator' ).setLevel( logging.DEBUG )
log.setLevel( logging.INFO )
main()
| {
"content_hash": "c69ee6d1ec82f2926e90f7d659a82c81",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 107,
"avg_line_length": 38.38235294117647,
"alnum_prop": 0.5478927203065134,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "e0ab387d7617311c82254f337d99fb5f3677d0bb",
"size": "6548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/src/generateraw.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.forms import ModelForm
@login_required
def member_view(request):
"""Return a rendered member view of member."""
all_meals = request.user.meal.all()
# import pdb; pdb.set_trace()
return render(request, 'member.html',context={
'username': request.user.username,
'first_name': request.user.first_name,
'last_name': request.user.last_name,
'email': request.user.email,
'meals': all_meals,
'follows': [follow for follow in request.user.member.following.all()],
'followers': [person for person in request.user.member.followers.all()],
})
class UserForm(ModelForm):
"""New user registration form"""
class Meta:
model = User
fields = [
'username',
'first_name',
'last_name',
'email']
@login_required
def edit_member_view(request):
"""Edit member view."""
template_name = 'edit_member.html'
user = request.user
form = UserForm(request.POST or None, instance=user)
if form.is_valid():
form.save()
return redirect('member')
return render(request, template_name, {'form': form})
| {
"content_hash": "3874e7c3b8899e46c946de51b0b2d002",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 30.72093023255814,
"alnum_prop": 0.6366389099167298,
"repo_name": "FoodLust/FL",
"id": "340c2db3f4dc3cafded54f120eacca77f94ceac0",
"size": "1321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "members/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1680"
},
{
"name": "HTML",
"bytes": "19129"
},
{
"name": "JavaScript",
"bytes": "536879"
},
{
"name": "Python",
"bytes": "46917"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
from scipy import stats
from numba import jit, prange
__all__ = ['bootci_nb',
'permtest_nb']
@jit(nopython=True, parallel=True, error_model='numpy')
def _bootstrap_jit(dat, statfunction, nstraps, nstats):
n = dat.shape[0]
res = np.zeros((nstraps, nstats))
for booti in range(nstraps):
rind = np.random.choice(np.arange(n), n)
res[booti, :] = statfunction(dat[rind, :])
"""Sort each stat independently"""
for stati in range(nstats):
res[:, stati].sort()
return res
'''
@jit(nopython=True, parallel=True, error_model='numpy')
def _jackknife_jit(dat, statfunction, nstats):
n = dat.shape[0]
jstats = np.zeros((n, nstats))
#jind = np.ones(n, dtype=np.bool_)
for i in prange(n):
jind = np.ones(n, dtype=np.bool_)
jind[i] = False
jstats[i, :] = statfunction(dat[jind, :])
#jind[i] = True
bca_accel = np.zeros(nstats)
for coli in range(nstats):
jmean = np.nanmean(jstats[:, coli])
bca_accel[coli] = np.nansum((jmean - jstats[:, coli])**3) / (6.0 * np.nansum((jmean - jstats[:, coli])**2)**1.5)
return bca_accel
'''
@jit(nopython=True, parallel=True, error_model='numpy')
def _jackknife_jit(dat, statfunction, nstats):
n = dat.shape[0]
jstats = np.zeros((n, nstats))
jind = np.ones(n, dtype=np.bool_)
for i in range(n):
jind[i] = False
jstats[i, :] = statfunction(dat[jind, :])
jind[i] = True
bca_accel = np.zeros(nstats)
for coli in range(nstats):
jmean = np.nanmean(jstats[:, coli])
bca_accel[coli] = np.nansum((jmean - jstats[:, coli])**3) / (6.0 * np.nansum((jmean - jstats[:, coli])**2)**1.5)
return bca_accel
def bootci_nb(dat, statfunction, alpha=0.05, n_samples=10000, method='bca'):
"""Estimate bootstrap CIs for a statfunction that operates along the rows of
a np.ndarray matrix and returns a np.ndarray vector of results.
Parameters
----------
dat : np.ndarray
Data that will be passed to statfunction as a single parameter.
statfunction : function
Function that should operate along the rows of dat and return a vector
alpha : float [0, 1]
Specify CI: [alpha/2, 1-alpha/2]
n_samples : int
Number of bootstrap samples.
method : str
Specify bias-corrected and accelerated ("bca") or percentile ("pi")
bootstrap.
Returns
-------
cis : np.ndarray [est, lcl, ucl] x [nstats]
Point-estimate and CI of statfunction of dat"""
ostat = statfunction(dat)
nstats = len(ostat)
alphas = np.array([alpha/2, 1-alpha/2])
"""boot_res.shape --> (n_samples, nstats)"""
boot_res = _bootstrap_jit(dat, statfunction, nstraps=n_samples, nstats=nstats)
if method == 'pi':
"""Percentile Interval Method
avals.shape --> (2, nstats)"""
avals = np.tile(alphas, (boot_res.shape[1], 1)).T
elif method == 'bca':
"""Bias-Corrected Accelerated Method
bca_accel.shape --> (nstats, )"""
bca_accel = _jackknife_jit(dat, statfunction, nstats)
z0 = stats.distributions.norm.ppf( (np.sum(boot_res < ostat[None, :], axis=0)) / np.sum(~np.isnan(boot_res), axis=0) )
zs = z0[None, :] + stats.distributions.norm.ppf(alphas).reshape(alphas.shape + (1,) * z0.ndim)
avals = stats.distributions.norm.cdf(z0[None, :] + zs / (1 - bca_accel[None, :] * zs))
non_nan_ind = ~np.isnan(boot_res)
nvals = np.round((np.sum(non_nan_ind, axis=0) - 1) * avals).astype(int)
"""cis.shape --> (nstats, 3)"""
cis = np.zeros((boot_res.shape[1], len(avals) + 1))
for i in range(boot_res.shape[1]):
cis[i, 0] = ostat[i]
if np.all(np.isnan(avals[:, i])):
print('No bootstrap variation in stat %d: LCL = UCL = observed stat' % (i))
cis[i, 1:1+len(alphas)] = ostat[i] * np.ones(len(alphas))
else:
cis[i, 1:1+len(alphas)] = boot_res[nvals[:, i], i]
if np.any(nvals[:, i] < 10) or np.any(nvals[:, i] > n_samples-10):
print('Extreme samples used for stat %d: [%d, %d]. Results unstable.' % (i, nvals[0,i], nvals[1,i]))
return cis
@jit(nopython=True, parallel=True, error_model='numpy')
def _perm_jit(d, sf, pcs, n):
res = sf(d)
samples = np.zeros((len(res), n))
"""Using prange here means we have to make a copy of d inside each loop
Cost is memory, but this should be fine with reasonably sized matrices.
Speed up is about 10x"""
for sampi in prange(n):
d_copy = d.copy()
rind = np.random.permutation(d_copy.shape[0])
for coli in pcs:
d_copy[:, coli] = d_copy[rind, coli]
samples[:, sampi] = sf(d_copy)
return samples
def permtest_nb(dat, statfunction, perm_cols, n_samples=9999, alternative='two-sided'):
"""Estimate p-values for the results of statfunction against the permutation null.
Parameters
----------
dat : np.ndarray matrix
Observed data required as sole input for statfunction.
statfunction : function
Operates on dat and returns a vector of statistics.
perm_cols : array of indices
Columns that need to be permuted in dat to generate a null dataset
n_samples : int
Number of permutations to test
alternative : str
Specify a "two-sided" test or one that tests that the observed data is "less" than
or "greater" than the null statistics.
Returns
-------
pvalue : float"""
samples = _perm_jit(dat.copy(), statfunction, np.array(perm_cols, dtype=np.int), int(n_samples))
if alternative == 'two-sided':
#pvalues = ((np.abs(samples) > np.abs(statfunction(dat)[None, :])).sum(axis=1) + 1) / (n_samples + 1)
pvalues = ((np.abs(samples) > np.abs(statfunction(dat)[:, None])).sum(axis=1) + 1) / (n_samples + 1)
elif alternative == 'greater':
pvalues = ((samples > statfunction(dat)[None, :]).sum(axis=1) + 1) / (n_samples + 1)
elif alternative == 'less':
pvalues = ((samples < statfunction(dat)[None, :]).sum(axis=1) + 1) / (n_samples + 1)
return pvalues
def _test_permtest(effect=0.5, n_samples=9999):
from scipy import stats
import time
dat = np.random.randn(1000, 5)
dat[:, 0] = np.random.randint(2, size=dat.shape[0])
dat[dat[:, 0] == 0, 1] = dat[dat[:, 0] == 0, 1] + effect
@jit(nopython=True)
def func(d):
return np.array([np.mean(d[d[:, 0] == 0, 1]) - np.mean(d[d[:, 0] == 1, 1])])
res = func(dat)
st = time.time()
res = permtest_nb(dat, func, perm_cols=[0], n_samples=n_samples)
et = (time.time() - st)
print(res)
print('Time: %1.2f sec' % et)
print(stats.ttest_ind(dat[dat[:, 0] == 0, 1], dat[dat[:, 0] == 1, 1]))
def _test_bootci(n_samples=10000, method='bca'):
import scikits.bootstrap as boot
import time
np.random.seed(110820)
dat = np.random.randn(1000, 5)
@jit(nopython=True)
def func(d):
return np.array([np.mean(d[:, 0]), np.median(d[:, 1]), np.max(d[:, 2])])
st = time.time()
res = bootci_nb(dat, func, alpha=0.05, n_samples=n_samples, method=method)
et = (time.time() - st)
print(res)
print('Time: %1.2f sec' % et)
st = time.time()
a = boot.ci(dat[:, 0], statfunction=np.mean, n_samples=n_samples, method=method)
b = boot.ci(dat[:, 1], statfunction=np.median, n_samples=n_samples, method=method)
c = boot.ci(dat[:, 2], statfunction=np.max, n_samples=n_samples, method=method)
et = (time.time() - st)
print('Mean_0', a)
print('Median_1', b)
print('Median_2', c)
print('Time: %1.2f sec' % et) | {
"content_hash": "4e405c04df8aeab5e2906e8e3fb43fa8",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 126,
"avg_line_length": 36.490566037735846,
"alnum_prop": 0.5919079627714581,
"repo_name": "agartland/utils",
"id": "ed9f2b46ed4eadbc91570add1ed0af8a1243ae49",
"size": "7736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootstrap_nb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "892465"
},
{
"name": "R",
"bytes": "934"
}
],
"symlink_target": ""
} |
from graphviz import Digraph
import math
import pymongo
import argparse
import sys
def write_list_to_file(alist, filepath):
""" Simply write a python list out to a file """
with open(filepath, 'w') as file:
for item in alist:
file.write("{}\n".format(item))
def generate_graph(related_subs, subscribers, nsfw_subs, censored, full, min_subscribers, outfile):
""" Make a graphviz graph by adding edges for each sub and related subs """
g = Digraph('G', filename=outfile)
edges_added = 0
for key in related_subs:
for sub in related_subs[key]:
if not sub or not sub in subscribers:
continue
# In nsfw_subs and censored is mutually exclusive
if ((sub in nsfw_subs) != (censored)) or full:
subscriber_cnt = subscribers[sub]
# Filter: only include edge if sub has # subscribers
if subscriber_cnt >= min_subscribers:
g.edge(key, sub, weight=calculate_edge_weight(subscriber_cnt))
print("Edge count: " + str(edges_added))
edges_added += 1
g.save()
def calculate_edge_weight(subscriber_cnt):
""" Keep weights relatively small despite large subscriber disparities """
if subscriber_cnt == 0:
log_cnt = 0
else:
log_cnt = math.log2(subscriber_cnt)
return str(log_cnt)
def usage(parser):
""" Let the user know the expected runtime args """
if len(sys.argv) == 1:
parser.print_help()
sys.exit()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--censored', action='store_true',
help='Hide over 18 subreddits', default=False)
parser.add_argument('-m', '--minimum', type=int, default=100, required=True,
help='Min subcribers to be added')
parser.add_argument('-n', '--nsfw', action='store_true',
help='Only over 18 subreddits', default=False)
parser.add_argument('-v', '--verbose', action='store_true',
help='Show debugging', default=False)
usage(parser)
args = parser.parse_args()
client = pymongo.MongoClient()
db = client.reddit
related_subs = {}
subscribers = {}
nsfw_subs = {}
private = []
subreddits = db.subreddits.find({'type': 'subreddit'})
if subreddits:
for subreddit in subreddits:
title = subreddit['_id']
links = subreddit['linked']
if 'subscribers' in subreddit:
subscribers[title] = subreddit['subscribers']
if 'adult' in subreddit:
nsfw_subs[title] = True
if 'access' in subreddit:
if subreddit['access'] == 'private':
private.append(title)
related_subs[title] = links
write_list_to_file(private, 'private_subs.txt')
censored = False
full = False
# If censored and nsfw flags, opt for censored
if args.censored:
outfile = 'censored.gv'
censored = True
elif args.nsfw:
outfile = 'nsfw.gv'
else:
outfile = 'full.gv'
full = True
generate_graph(related_subs, subscribers, nsfw_subs,
censored, full, args.minimum, outfile)
if __name__ == '__main__':
main()
| {
"content_hash": "50f4639f8fcfb7baa54493eb7e6f33a4",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 99,
"avg_line_length": 27.958677685950413,
"alnum_prop": 0.5746378953591487,
"repo_name": "cdated/reddit-crawler",
"id": "98131a56e6e3d42405a2a683da9393c8f110faa6",
"size": "3431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grapher.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10157"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
} |
import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms
# ================================================================== #
# Table of Contents #
# ================================================================== #
# 1. Basic autograd example 1 (Line 25 to 39)
# 2. Basic autograd example 2 (Line 46 to 83)
# 3. Loading data from numpy (Line 90 to 97)
# 4. Input pipline (Line 104 to 129)
# 5. Input pipline for custom dataset (Line 136 to 156)
# 6. Pretrained model (Line 163 to 176)
# 7. Save and load model (Line 183 to 189)
# ================================================================== #
# 1. Basic autograd example 1 #
# ================================================================== #
# Create tensors.
x = torch.tensor(1., requires_grad=True)
w = torch.tensor(2., requires_grad=True)
b = torch.tensor(3., requires_grad=True)
# Build a computational graph.
y = w * x + b # y = 2 * x + 3
# Compute gradients.
y.backward()
# Print out the gradients.
print(x.grad) # x.grad = 2
print(w.grad) # w.grad = 1
print(b.grad) # b.grad = 1
# ================================================================== #
# 2. Basic autograd example 2 #
# ================================================================== #
# Create tensors of shape (10, 3) and (10, 2).
x = torch.randn(10, 3)
y = torch.randn(10, 2)
# Build a fully connected layer.
linear = nn.Linear(3, 2)
print ('w: ', linear.weight)
print ('b: ', linear.bias)
# Build loss function and optimizer.
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)
# Forward pass.
pred = linear(x)
# Compute loss.
loss = criterion(pred, y)
print('loss: ', loss.item())
# Backward pass.
loss.backward()
# Print out the gradients.
print ('dL/dw: ', linear.weight.grad)
print ('dL/db: ', linear.bias.grad)
# 1-step gradient descent.
optimizer.step()
# You can also perform gradient descent at the low level.
# linear.weight.data.sub_(0.01 * linear.weight.grad.data)
# linear.bias.data.sub_(0.01 * linear.bias.grad.data)
# Print out the loss after 1-step gradient descent.
pred = linear(x)
loss = criterion(pred, y)
print('loss after 1 step optimization: ', loss.item())
# ================================================================== #
# 3. Loading data from numpy #
# ================================================================== #
# Create a numpy array.
x = np.array([[1, 2], [3, 4]])
# Convert the numpy array to a torch tensor.
y = torch.from_numpy(x)
# Convert the torch tensor to a numpy array.
z = y.numpy()
# ================================================================== #
# 4. Input pipeline #
# ================================================================== #
# Download and construct CIFAR-10 dataset.
train_dataset = torchvision.datasets.CIFAR10(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
# Fetch one data pair (read data from disk).
image, label = train_dataset[0]
print (image.size())
print (label)
# Data loader (this provides queues and threads in a very simple way).
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=64,
shuffle=True)
# When iteration starts, queue and thread start to load data from files.
data_iter = iter(train_loader)
# Mini-batch images and labels.
images, labels = data_iter.next()
# Actual usage of the data loader is as below.
for images, labels in train_loader:
# Training code should be written here.
pass
# ================================================================== #
# 5. Input pipeline for custom dataset #
# ================================================================== #
# You should build your custom dataset as below.
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# TODO
# 1. Initialize file paths or a list of file names.
pass
def __getitem__(self, index):
# TODO
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
# 2. Preprocess the data (e.g. torchvision.Transform).
# 3. Return a data pair (e.g. image and label).
pass
def __len__(self):
# You should change 0 to the total size of your dataset.
return 0
# You can then use the prebuilt data loader.
custom_dataset = CustomDataset()
train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
batch_size=64,
shuffle=True)
# ================================================================== #
# 6. Pretrained model #
# ================================================================== #
# Download and load the pretrained ResNet-18.
resnet = torchvision.models.resnet18(pretrained=True)
# If you want to finetune only the top layer of the model, set as below.
for param in resnet.parameters():
param.requires_grad = False
# Replace the top layer for finetuning.
resnet.fc = nn.Linear(resnet.fc.in_features, 100) # 100 is an example.
# Forward pass.
images = torch.randn(64, 3, 224, 224)
outputs = resnet(images)
print (outputs.size()) # (64, 100)
# ================================================================== #
# 7. Save and load the model #
# ================================================================== #
# Save and load the entire model.
torch.save(resnet, 'model.ckpt')
model = torch.load('model.ckpt')
# Save and load only the model parameters (recommended).
torch.save(resnet.state_dict(), 'params.ckpt')
resnet.load_state_dict(torch.load('params.ckpt'))
| {
"content_hash": "885b20964e802370b55811527e2f908a",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 81,
"avg_line_length": 33.39153439153439,
"alnum_prop": 0.4821739819363017,
"repo_name": "yunjey/pytorch-tutorial",
"id": "744400c20d0f529d41b210cb755845f7b93d0d83",
"size": "6311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/01-basics/pytorch_basics/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72066"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
} |
import os
import tempfile
from django_seo_js import VERSION
from fabric.api import *
SITE_DIR = "site"
WHITELIST_DIRS = [".git", ]
WHITELIST_FILES = [".gitignore", ]
SANITY_CHECK_PROJECT_FILES = ["fabfile.py", "setup.py", "mkdocs.yml"]
SANITY_CHECK_BUILD_FILES = ["index.html", "js", "css"]
def _splitpath(path):
path = os.path.normpath(path)
return path.split(os.sep)
def tag_release():
# Tag the release:
local("git tag %s" % VERSION)
local("git push --tags")
def upload_release():
local("python setup.py sdist")
local("python setup.py bdist_wheel --universal")
local("twine upload dist/*")
local("rm dist/*")
def release():
# deploy_docs()
upload_release()
tag_release()
def deploy_docs():
# For someday move to mkdocs. Stolen verbatim from will.
# Sanity check dir.
root_dir = os.getcwd()
assert all([os.path.exists(os.path.join(root_dir, f)) for f in SANITY_CHECK_PROJECT_FILES])
local("rm -rf %s" % SITE_DIR)
local("mkdocs build")
tempdir = tempfile.mkdtemp()
local("mv %s/* %s" % (SITE_DIR, tempdir))
current_branch = local("git rev-parse --abbrev-ref HEAD", capture=True)
last_commit = local("git log -1 --pretty=\%B", capture=True)
# Add the new site to build
local("git checkout gh-pages")
# Sanity check dir.
root_dir = os.getcwd()
assert all([os.path.exists(os.path.join(root_dir, f)) for f in SANITY_CHECK_BUILD_FILES])
for root, dirs, files in os.walk(root_dir, topdown=False):
for name in files:
if name not in WHITELIST_FILES and not any([r in WHITELIST_DIRS for r in _splitpath(root)]):
# print "removing %s" % (os.path.join(root, name))
os.remove(os.path.join(root, name))
for name in dirs:
if name not in WHITELIST_DIRS and not any([r in WHITELIST_DIRS for r in _splitpath(root)]):
# print "removing %s" % (os.path.join(root, name))
os.rmdir(os.path.join(root, name))
local("cp -rv %s/* ." % tempdir)
with settings(warn_only=True):
result = local("git diff --exit-code")
if result.return_code != 0:
local("git add -A .")
local("git commit -m 'Auto-update of docs: %s'" % last_commit)
local("git push")
else:
print("No changes to the docs.")
local("git checkout %s" % current_branch)
| {
"content_hash": "a5420207c494daea516d0475f469346c",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 104,
"avg_line_length": 29.9875,
"alnum_prop": 0.6106711129637349,
"repo_name": "skoczen/django-seo-js",
"id": "ff90f4c9c2b53bad1635ec420b181de3c53f2cde",
"size": "2399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35135"
}
],
"symlink_target": ""
} |
from functools import namedtuple
Tile = namedtuple('Tile', ['top', 'bottom'])
def placeDominoTile(tiles, newTile):
if not tiles:
return [newTile]
elif len(tiles) == 1:
if tiles[0].bottom == newTile.top:
tiles.append(newTile)
return tiles
elif tiles[0].top == newTile.bottom:
tiles.insert(0, newTile)
return tiles
else:
for index, tile in enumerate(tiles):
try:
if tile.bottom == newTile.top and newTile.bottom == tiles[index + 1].top:
tiles.insert(index + 1, newTile)
return tiles
except IndexError:
if tile.bottom == newTile.top:
tiles.append(newTile)
return tiles
print(placeDominoTile([], Tile(1, 1)))
print(placeDominoTile([Tile(1,1)], Tile(3,2)))
print(placeDominoTile([Tile(2,1)], Tile(3,2)))
print(placeDominoTile([Tile(1,3)], Tile(3,2)))
print(placeDominoTile([Tile(2,1), Tile(1,3), Tile(3,5), Tile(5,6)], Tile(3,3)))
print(placeDominoTile([Tile(2,1), Tile(1,3), Tile(3,5), Tile(5,6)], Tile(1,1)))
print(placeDominoTile([Tile(2,1), Tile(1,3), Tile(3,5), Tile(5,6)], Tile(6,4)))
def calculateDominoTiles(tiles):
res = 0
for index, tile in enumerate(tiles):
try:
if tile.bottom != tiles[index + 1].top:
return -1
except IndexError:
pass
res += tile.top + tile.bottom
return res
print(calculateDominoTiles([Tile(3,3)]))
print(calculateDominoTiles([Tile(3,3), Tile(3,5)]))
print(calculateDominoTiles([Tile(3,3), Tile(3,4), Tile(5,1)])) | {
"content_hash": "487d52f83c68b1701d02a4f244a4a416",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 89,
"avg_line_length": 34.723404255319146,
"alnum_prop": 0.5808823529411765,
"repo_name": "adrianbeloqui/Python",
"id": "8a9bda69f6cda02482133d6bf3918782aa69e656",
"size": "1632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domino_tiles_reworked.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19877"
}
],
"symlink_target": ""
} |
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.throttling import AnonRateThrottle, UserRateThrottle
from seaserv import seafile_api
from seahub import settings
from seahub.api2.utils import json_response, api_error
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.models import Token, TokenV2
class LogoutDeviceView(APIView):
"""Removes the api token of a device that has already logged in. If the device
is a desktop client, also remove all sync tokens of repos synced on that
client .
"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
throttle_classes = (UserRateThrottle,)
@json_response
def post(self, request, format=None):
auth_token = request.auth
if isinstance(auth_token, TokenV2) and auth_token.is_desktop_client():
seafile_api.delete_repo_tokens_by_peer_id(request.user.username, auth_token.device_id)
auth_token.delete()
return {}
| {
"content_hash": "5f12361ae87bf50be2ea364f12e75689",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 98,
"avg_line_length": 42.19230769230769,
"alnum_prop": 0.7529626253418414,
"repo_name": "cloudcopy/seahub",
"id": "123c08562c51a544ca5ca158861c5cef37ebde3b",
"size": "1097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seahub/api2/views_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231001"
},
{
"name": "HTML",
"bytes": "756152"
},
{
"name": "JavaScript",
"bytes": "2430927"
},
{
"name": "PLSQL",
"bytes": "16796"
},
{
"name": "Python",
"bytes": "1508638"
},
{
"name": "Shell",
"bytes": "9365"
}
],
"symlink_target": ""
} |
DATA_DIR = 'Data'
BACKUP_DIR = '.bkup'
USS_DATABASE_FILENAME = 'USSDatabase.csv'
CHANNEL_DATABASE_FILENAME = 'ChannelDatabase.csv'
| {
"content_hash": "29f5afc59afef4a4a2a873b22b5ffa27",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 49,
"avg_line_length": 32.75,
"alnum_prop": 0.7480916030534351,
"repo_name": "AdamRSterling/USS_StatsBot",
"id": "d4099a366c1a610f7a7b67a3c118c895953d87de",
"size": "131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FileDetails.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16131"
}
],
"symlink_target": ""
} |
Subsets and Splits