hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7944bfc956c75723afe4b7eab80f07fa811a643e | 7,083 | py | Python | pypll.py | tomvleeuwen/pypll | 0609b38065b6393b7cb2039aa3e19d53f7dee606 | [
"MIT"
] | 1 | 2022-01-18T17:05:40.000Z | 2022-01-18T17:05:40.000Z | pypll.py | tomvleeuwen/pypll | 0609b38065b6393b7cb2039aa3e19d53f7dee606 | [
"MIT"
] | null | null | null | pypll.py | tomvleeuwen/pypll | 0609b38065b6393b7cb2039aa3e19d53f7dee606 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pypll.py
#
# Copyright (c) 2018 Tom van Leeuwen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# The PyPLL class can be used to keep the time in sync with an external
# time source. All you need to do to keep the system time in sync
# is pass the offset of the real time vs the local time (see main() below)
#
import time
import logging
import adjtimex
# freq is ppm (parts per million) with a 16-bit fractional part (2^-16 ppm)
SEC_TO_FREQ = 65536000000
# Max offset in seconds before making a step.
MAX_OFFSET = 0.5
# Maximum offset to enter sync state
SYNC_OFFSET = 0.005
class PyPLL(object):
""" PyPLL
This is a wrapper around the adjtimex module that just accepts
an offset and keeps the system clock in sync.
@param max_offset: Make a timestep if the offset is larger than this value
@param sync_offset: Enter tracking if offset is smaller than this value.
"""
FREE_RUNNING=0
LOCKED=1
def __init__(self, max_offset=MAX_OFFSET, sync_offset=SYNC_OFFSET):
assert max_offset >= sync_offset
self.state = self.FREE_RUNNING
self.max_offset = max_offset
def process_offset(self, offset):
""" process_offset
Wrapper function to help set up the system clock from a known time offset.
The first time this function is called, a step is made.
When offset is too large, a step is made again.
This function is basically all you need to keep the time in sync.
@param offset: Time offset in (actual_time - system_time)
return None
"""
# Unlock if the offset becomes too big. Something probably changed.
# And skip init if offset is low enough.
if abs(offset) > self.max_offset:
self.state = self.FREE_RUNNING
elif abs(offset) < SYNC_OFFSET:
self.state = self.LOCKED
if self.state == self.FREE_RUNNING:
logging.info("Making timestep of %e s", offset)
self.clear_time_state()
self.timestep(offset)
else:
logging.debug("Offset: %e", offset)
self.set_offset(offset)
def clear_time_state(self):
""" clear_time_state
Clears the time state so that the clock can be adjusted manually again.
return: None
"""
adjtimex.adjtimex(adjtimex.Timex(modes=adjtimex.ADJ_STATUS, status=adjtimex.STA_PLL))
adjtimex.adjtimex(adjtimex.Timex(modes=adjtimex.ADJ_STATUS))
def timestep(self, seconds = 0.0):
""" timestep
Makes a timestep using the provided seconds. Time will be added
to the system time so a positive value will make the clock go forward.
@param seconds: Number of seconds to adjust the system clock
return: None
"""
microseconds = int(round(seconds * 1000000))
seconds_int = int(microseconds // 1000000)
usec = int(microseconds - (seconds_int * 1000000))
timeval = adjtimex.Timeval(seconds_int, usec)
timeadj = adjtimex.Timex(modes=adjtimex.ADJ_SETOFFSET | adjtimex.ADJ_MICRO,
time=timeval)
adjtimex.adjtimex(timeadj)
def set_speed(self, factor=1.0):
""" set_speed
Sets the system frequency, obtained using get_speed.
@param factor : Speed of system clock, should be close to 1.0
return: None
"""
# Tick is always positive, we can round by adding 0.5
tick = int(factor * 10000 + 0.5)
remainder = factor - (tick / 10000.0)
freq = int(round(remainder * SEC_TO_FREQ))
timeadj = adjtimex.Timex(modes=adjtimex.ADJ_FREQUENCY | adjtimex.ADJ_TICK |
adjtimex.ADJ_STATUS,
freq = freq,
tick = tick,
status = adjtimex.STA_PLL)
adjtimex.adjtimex(timeadj)
def get_speed(self):
""" get_speed
Gets the current system clock speed. Can be used to provide to set_speed after a reboot.
return: Speed of system clock, close to 1.0.
"""
timeadj = adjtimex.Timex(modes=0)
adjtimex.adjtimex(timeadj)
speed = float(timeadj.tick) / 10000 + float(timeadj.freq) / SEC_TO_FREQ
return speed
def set_offset(self, offset=0.0):
""" set_offset
Passes the offset to the kernel, making the actual time PLL work.
@param offset: Time offset in (actual_time - system_time), as passed to the kernel
return: None
"""
offset_us = int(offset * 1000000)
timeadj = adjtimex.Timex(modes=adjtimex.ADJ_OFFSET | adjtimex.ADJ_STATUS |
adjtimex.ADJ_MICRO | adjtimex.ADJ_MAXERROR,
offset = offset_us,
maxerror = abs(offset_us),
status = adjtimex.STA_PLL)
adjtimex.adjtimex(timeadj)
def main():
# Test: Stay in lock with pool.ntp.org.
logging.basicConfig(level=logging.DEBUG)
import sys
import ntplib
pll=PyPLL()
ntp_client = ntplib.NTPClient()
# Tests get_speed and set_speed
speed = pll.get_speed()
print("Current frequency: %e" % speed)
pll.set_speed(speed)
# Basic usage:
# 1) Get offset (in this example using NTP),
# 2) Apply offset.
while True:
response = ntp_client.request('pool.ntp.org', version=3)
pll.process_offset(response.offset)
time.sleep(16)
if __name__ == "__main__":
main()
| 35.592965 | 100 | 0.60497 |
7944c0494a7ae39852b42103e3fb6daf76b256e4 | 131,240 | py | Python | google/cloud/recommender/v1/recommender-v1-py/tests/unit/gapic/recommender_v1/test_recommender.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/recommender/v1/recommender-v1-py/tests/unit/gapic/recommender_v1/test_recommender.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/recommender/v1/recommender-v1-py/tests/unit/gapic/recommender_v1/test_recommender.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.recommender_v1.services.recommender import RecommenderAsyncClient
from google.cloud.recommender_v1.services.recommender import RecommenderClient
from google.cloud.recommender_v1.services.recommender import pagers
from google.cloud.recommender_v1.services.recommender import transports
from google.cloud.recommender_v1.services.recommender.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.recommender_v1.types import insight
from google.cloud.recommender_v1.types import recommendation
from google.cloud.recommender_v1.types import recommender_service
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RecommenderClient._get_default_mtls_endpoint(None) is None
assert RecommenderClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert RecommenderClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert RecommenderClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert RecommenderClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert RecommenderClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
RecommenderClient,
RecommenderAsyncClient,
])
def test_recommender_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'recommender.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.RecommenderGrpcTransport, "grpc"),
(transports.RecommenderGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_recommender_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
RecommenderClient,
RecommenderAsyncClient,
])
def test_recommender_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'recommender.googleapis.com:443'
def test_recommender_client_get_transport_class():
transport = RecommenderClient.get_transport_class()
available_transports = [
transports.RecommenderGrpcTransport,
]
assert transport in available_transports
transport = RecommenderClient.get_transport_class("grpc")
assert transport == transports.RecommenderGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(RecommenderClient, transports.RecommenderGrpcTransport, "grpc"),
(RecommenderAsyncClient, transports.RecommenderGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(RecommenderClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RecommenderClient))
@mock.patch.object(RecommenderAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RecommenderAsyncClient))
def test_recommender_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(RecommenderClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(RecommenderClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(RecommenderClient, transports.RecommenderGrpcTransport, "grpc", "true"),
(RecommenderAsyncClient, transports.RecommenderGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(RecommenderClient, transports.RecommenderGrpcTransport, "grpc", "false"),
(RecommenderAsyncClient, transports.RecommenderGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(RecommenderClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RecommenderClient))
@mock.patch.object(RecommenderAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RecommenderAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_recommender_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(RecommenderClient, transports.RecommenderGrpcTransport, "grpc"),
(RecommenderAsyncClient, transports.RecommenderGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_recommender_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(RecommenderClient, transports.RecommenderGrpcTransport, "grpc"),
(RecommenderAsyncClient, transports.RecommenderGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_recommender_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_recommender_client_client_options_from_dict():
with mock.patch('google.cloud.recommender_v1.services.recommender.transports.RecommenderGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = RecommenderClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_list_insights(transport: str = 'grpc', request_type=recommender_service.ListInsightsRequest):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommender_service.ListInsightsResponse(
next_page_token='next_page_token_value',
)
response = client.list_insights(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.ListInsightsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInsightsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_insights_from_dict():
test_list_insights(request_type=dict)
def test_list_insights_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
client.list_insights()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.ListInsightsRequest()
@pytest.mark.asyncio
async def test_list_insights_async(transport: str = 'grpc_asyncio', request_type=recommender_service.ListInsightsRequest):
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recommender_service.ListInsightsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_insights(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.ListInsightsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListInsightsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_insights_async_from_dict():
await test_list_insights_async(request_type=dict)
def test_list_insights_field_headers():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.ListInsightsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
call.return_value = recommender_service.ListInsightsResponse()
client.list_insights(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_insights_field_headers_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.ListInsightsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommender_service.ListInsightsResponse())
await client.list_insights(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_insights_flattened():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommender_service.ListInsightsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_insights(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_insights_flattened_error():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_insights(
recommender_service.ListInsightsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_insights_flattened_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommender_service.ListInsightsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommender_service.ListInsightsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_insights(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_insights_flattened_error_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_insights(
recommender_service.ListInsightsRequest(),
parent='parent_value',
)
def test_list_insights_pager():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
insight.Insight(),
insight.Insight(),
],
next_page_token='abc',
),
recommender_service.ListInsightsResponse(
insights=[],
next_page_token='def',
),
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
],
next_page_token='ghi',
),
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
insight.Insight(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_insights(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, insight.Insight)
for i in results)
def test_list_insights_pages():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
insight.Insight(),
insight.Insight(),
],
next_page_token='abc',
),
recommender_service.ListInsightsResponse(
insights=[],
next_page_token='def',
),
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
],
next_page_token='ghi',
),
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
insight.Insight(),
],
),
RuntimeError,
)
pages = list(client.list_insights(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_insights_async_pager():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
insight.Insight(),
insight.Insight(),
],
next_page_token='abc',
),
recommender_service.ListInsightsResponse(
insights=[],
next_page_token='def',
),
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
],
next_page_token='ghi',
),
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
insight.Insight(),
],
),
RuntimeError,
)
async_pager = await client.list_insights(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, insight.Insight)
for i in responses)
@pytest.mark.asyncio
async def test_list_insights_async_pages():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_insights),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
insight.Insight(),
insight.Insight(),
],
next_page_token='abc',
),
recommender_service.ListInsightsResponse(
insights=[],
next_page_token='def',
),
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
],
next_page_token='ghi',
),
recommender_service.ListInsightsResponse(
insights=[
insight.Insight(),
insight.Insight(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_insights(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_insight(transport: str = 'grpc', request_type=recommender_service.GetInsightRequest):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_insight),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = insight.Insight(
name='name_value',
description='description_value',
target_resources=['target_resources_value'],
insight_subtype='insight_subtype_value',
category=insight.Insight.Category.COST,
etag='etag_value',
)
response = client.get_insight(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.GetInsightRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, insight.Insight)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.target_resources == ['target_resources_value']
assert response.insight_subtype == 'insight_subtype_value'
assert response.category == insight.Insight.Category.COST
assert response.etag == 'etag_value'
def test_get_insight_from_dict():
test_get_insight(request_type=dict)
def test_get_insight_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_insight),
'__call__') as call:
client.get_insight()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.GetInsightRequest()
@pytest.mark.asyncio
async def test_get_insight_async(transport: str = 'grpc_asyncio', request_type=recommender_service.GetInsightRequest):
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_insight),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(insight.Insight(
name='name_value',
description='description_value',
target_resources=['target_resources_value'],
insight_subtype='insight_subtype_value',
category=insight.Insight.Category.COST,
etag='etag_value',
))
response = await client.get_insight(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.GetInsightRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, insight.Insight)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.target_resources == ['target_resources_value']
assert response.insight_subtype == 'insight_subtype_value'
assert response.category == insight.Insight.Category.COST
assert response.etag == 'etag_value'
@pytest.mark.asyncio
async def test_get_insight_async_from_dict():
await test_get_insight_async(request_type=dict)
def test_get_insight_field_headers():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.GetInsightRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_insight),
'__call__') as call:
call.return_value = insight.Insight()
client.get_insight(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_insight_field_headers_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.GetInsightRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_insight),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(insight.Insight())
await client.get_insight(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_insight_flattened():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_insight),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = insight.Insight()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_insight(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_insight_flattened_error():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_insight(
recommender_service.GetInsightRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_insight_flattened_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_insight),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = insight.Insight()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(insight.Insight())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_insight(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_insight_flattened_error_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_insight(
recommender_service.GetInsightRequest(),
name='name_value',
)
def test_mark_insight_accepted(transport: str = 'grpc', request_type=recommender_service.MarkInsightAcceptedRequest):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_insight_accepted),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = insight.Insight(
name='name_value',
description='description_value',
target_resources=['target_resources_value'],
insight_subtype='insight_subtype_value',
category=insight.Insight.Category.COST,
etag='etag_value',
)
response = client.mark_insight_accepted(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkInsightAcceptedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, insight.Insight)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.target_resources == ['target_resources_value']
assert response.insight_subtype == 'insight_subtype_value'
assert response.category == insight.Insight.Category.COST
assert response.etag == 'etag_value'
def test_mark_insight_accepted_from_dict():
test_mark_insight_accepted(request_type=dict)
def test_mark_insight_accepted_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_insight_accepted),
'__call__') as call:
client.mark_insight_accepted()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkInsightAcceptedRequest()
@pytest.mark.asyncio
async def test_mark_insight_accepted_async(transport: str = 'grpc_asyncio', request_type=recommender_service.MarkInsightAcceptedRequest):
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_insight_accepted),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(insight.Insight(
name='name_value',
description='description_value',
target_resources=['target_resources_value'],
insight_subtype='insight_subtype_value',
category=insight.Insight.Category.COST,
etag='etag_value',
))
response = await client.mark_insight_accepted(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkInsightAcceptedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, insight.Insight)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.target_resources == ['target_resources_value']
assert response.insight_subtype == 'insight_subtype_value'
assert response.category == insight.Insight.Category.COST
assert response.etag == 'etag_value'
@pytest.mark.asyncio
async def test_mark_insight_accepted_async_from_dict():
await test_mark_insight_accepted_async(request_type=dict)
def test_mark_insight_accepted_field_headers():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.MarkInsightAcceptedRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_insight_accepted),
'__call__') as call:
call.return_value = insight.Insight()
client.mark_insight_accepted(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_mark_insight_accepted_field_headers_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.MarkInsightAcceptedRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_insight_accepted),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(insight.Insight())
await client.mark_insight_accepted(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_mark_insight_accepted_flattened():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_insight_accepted),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = insight.Insight()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mark_insight_accepted(
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].state_metadata == {'key_value': 'value_value'}
assert args[0].etag == 'etag_value'
def test_mark_insight_accepted_flattened_error():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mark_insight_accepted(
recommender_service.MarkInsightAcceptedRequest(),
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
@pytest.mark.asyncio
async def test_mark_insight_accepted_flattened_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_insight_accepted),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = insight.Insight()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(insight.Insight())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.mark_insight_accepted(
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].state_metadata == {'key_value': 'value_value'}
assert args[0].etag == 'etag_value'
@pytest.mark.asyncio
async def test_mark_insight_accepted_flattened_error_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.mark_insight_accepted(
recommender_service.MarkInsightAcceptedRequest(),
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
def test_list_recommendations(transport: str = 'grpc', request_type=recommender_service.ListRecommendationsRequest):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommender_service.ListRecommendationsResponse(
next_page_token='next_page_token_value',
)
response = client.list_recommendations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.ListRecommendationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListRecommendationsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_recommendations_from_dict():
test_list_recommendations(request_type=dict)
def test_list_recommendations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
client.list_recommendations()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.ListRecommendationsRequest()
@pytest.mark.asyncio
async def test_list_recommendations_async(transport: str = 'grpc_asyncio', request_type=recommender_service.ListRecommendationsRequest):
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recommender_service.ListRecommendationsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_recommendations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.ListRecommendationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListRecommendationsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_recommendations_async_from_dict():
await test_list_recommendations_async(request_type=dict)
def test_list_recommendations_field_headers():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.ListRecommendationsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
call.return_value = recommender_service.ListRecommendationsResponse()
client.list_recommendations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_recommendations_field_headers_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.ListRecommendationsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommender_service.ListRecommendationsResponse())
await client.list_recommendations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_recommendations_flattened():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommender_service.ListRecommendationsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_recommendations(
parent='parent_value',
filter='filter_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].filter == 'filter_value'
def test_list_recommendations_flattened_error():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_recommendations(
recommender_service.ListRecommendationsRequest(),
parent='parent_value',
filter='filter_value',
)
@pytest.mark.asyncio
async def test_list_recommendations_flattened_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommender_service.ListRecommendationsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommender_service.ListRecommendationsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_recommendations(
parent='parent_value',
filter='filter_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].filter == 'filter_value'
@pytest.mark.asyncio
async def test_list_recommendations_flattened_error_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_recommendations(
recommender_service.ListRecommendationsRequest(),
parent='parent_value',
filter='filter_value',
)
def test_list_recommendations_pager():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
recommendation.Recommendation(),
recommendation.Recommendation(),
],
next_page_token='abc',
),
recommender_service.ListRecommendationsResponse(
recommendations=[],
next_page_token='def',
),
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
],
next_page_token='ghi',
),
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
recommendation.Recommendation(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_recommendations(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, recommendation.Recommendation)
for i in results)
def test_list_recommendations_pages():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
recommendation.Recommendation(),
recommendation.Recommendation(),
],
next_page_token='abc',
),
recommender_service.ListRecommendationsResponse(
recommendations=[],
next_page_token='def',
),
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
],
next_page_token='ghi',
),
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
recommendation.Recommendation(),
],
),
RuntimeError,
)
pages = list(client.list_recommendations(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_recommendations_async_pager():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
recommendation.Recommendation(),
recommendation.Recommendation(),
],
next_page_token='abc',
),
recommender_service.ListRecommendationsResponse(
recommendations=[],
next_page_token='def',
),
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
],
next_page_token='ghi',
),
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
recommendation.Recommendation(),
],
),
RuntimeError,
)
async_pager = await client.list_recommendations(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, recommendation.Recommendation)
for i in responses)
@pytest.mark.asyncio
async def test_list_recommendations_async_pages():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_recommendations),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
recommendation.Recommendation(),
recommendation.Recommendation(),
],
next_page_token='abc',
),
recommender_service.ListRecommendationsResponse(
recommendations=[],
next_page_token='def',
),
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
],
next_page_token='ghi',
),
recommender_service.ListRecommendationsResponse(
recommendations=[
recommendation.Recommendation(),
recommendation.Recommendation(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_recommendations(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_recommendation(transport: str = 'grpc', request_type=recommender_service.GetRecommendationRequest):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation(
name='name_value',
description='description_value',
recommender_subtype='recommender_subtype_value',
etag='etag_value',
)
response = client.get_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.GetRecommendationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.recommender_subtype == 'recommender_subtype_value'
assert response.etag == 'etag_value'
def test_get_recommendation_from_dict():
test_get_recommendation(request_type=dict)
def test_get_recommendation_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
client.get_recommendation()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.GetRecommendationRequest()
@pytest.mark.asyncio
async def test_get_recommendation_async(transport: str = 'grpc_asyncio', request_type=recommender_service.GetRecommendationRequest):
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation(
name='name_value',
description='description_value',
recommender_subtype='recommender_subtype_value',
etag='etag_value',
))
response = await client.get_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.GetRecommendationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.recommender_subtype == 'recommender_subtype_value'
assert response.etag == 'etag_value'
@pytest.mark.asyncio
async def test_get_recommendation_async_from_dict():
await test_get_recommendation_async(request_type=dict)
def test_get_recommendation_field_headers():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.GetRecommendationRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
call.return_value = recommendation.Recommendation()
client.get_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_recommendation_field_headers_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.GetRecommendationRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation())
await client.get_recommendation(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_recommendation_flattened():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_recommendation(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_recommendation_flattened_error():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_recommendation(
recommender_service.GetRecommendationRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_recommendation_flattened_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_recommendation),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_recommendation(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_recommendation_flattened_error_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_recommendation(
recommender_service.GetRecommendationRequest(),
name='name_value',
)
def test_mark_recommendation_claimed(transport: str = 'grpc', request_type=recommender_service.MarkRecommendationClaimedRequest):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_claimed),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation(
name='name_value',
description='description_value',
recommender_subtype='recommender_subtype_value',
etag='etag_value',
)
response = client.mark_recommendation_claimed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationClaimedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.recommender_subtype == 'recommender_subtype_value'
assert response.etag == 'etag_value'
def test_mark_recommendation_claimed_from_dict():
test_mark_recommendation_claimed(request_type=dict)
def test_mark_recommendation_claimed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_claimed),
'__call__') as call:
client.mark_recommendation_claimed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationClaimedRequest()
@pytest.mark.asyncio
async def test_mark_recommendation_claimed_async(transport: str = 'grpc_asyncio', request_type=recommender_service.MarkRecommendationClaimedRequest):
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_claimed),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation(
name='name_value',
description='description_value',
recommender_subtype='recommender_subtype_value',
etag='etag_value',
))
response = await client.mark_recommendation_claimed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationClaimedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.recommender_subtype == 'recommender_subtype_value'
assert response.etag == 'etag_value'
@pytest.mark.asyncio
async def test_mark_recommendation_claimed_async_from_dict():
await test_mark_recommendation_claimed_async(request_type=dict)
def test_mark_recommendation_claimed_field_headers():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.MarkRecommendationClaimedRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_claimed),
'__call__') as call:
call.return_value = recommendation.Recommendation()
client.mark_recommendation_claimed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_mark_recommendation_claimed_field_headers_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.MarkRecommendationClaimedRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_claimed),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation())
await client.mark_recommendation_claimed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_mark_recommendation_claimed_flattened():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_claimed),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mark_recommendation_claimed(
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].state_metadata == {'key_value': 'value_value'}
assert args[0].etag == 'etag_value'
def test_mark_recommendation_claimed_flattened_error():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mark_recommendation_claimed(
recommender_service.MarkRecommendationClaimedRequest(),
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
@pytest.mark.asyncio
async def test_mark_recommendation_claimed_flattened_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_claimed),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.mark_recommendation_claimed(
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].state_metadata == {'key_value': 'value_value'}
assert args[0].etag == 'etag_value'
@pytest.mark.asyncio
async def test_mark_recommendation_claimed_flattened_error_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.mark_recommendation_claimed(
recommender_service.MarkRecommendationClaimedRequest(),
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
def test_mark_recommendation_succeeded(transport: str = 'grpc', request_type=recommender_service.MarkRecommendationSucceededRequest):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_succeeded),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation(
name='name_value',
description='description_value',
recommender_subtype='recommender_subtype_value',
etag='etag_value',
)
response = client.mark_recommendation_succeeded(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationSucceededRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.recommender_subtype == 'recommender_subtype_value'
assert response.etag == 'etag_value'
def test_mark_recommendation_succeeded_from_dict():
test_mark_recommendation_succeeded(request_type=dict)
def test_mark_recommendation_succeeded_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_succeeded),
'__call__') as call:
client.mark_recommendation_succeeded()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationSucceededRequest()
@pytest.mark.asyncio
async def test_mark_recommendation_succeeded_async(transport: str = 'grpc_asyncio', request_type=recommender_service.MarkRecommendationSucceededRequest):
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_succeeded),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation(
name='name_value',
description='description_value',
recommender_subtype='recommender_subtype_value',
etag='etag_value',
))
response = await client.mark_recommendation_succeeded(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationSucceededRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.recommender_subtype == 'recommender_subtype_value'
assert response.etag == 'etag_value'
@pytest.mark.asyncio
async def test_mark_recommendation_succeeded_async_from_dict():
await test_mark_recommendation_succeeded_async(request_type=dict)
def test_mark_recommendation_succeeded_field_headers():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.MarkRecommendationSucceededRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_succeeded),
'__call__') as call:
call.return_value = recommendation.Recommendation()
client.mark_recommendation_succeeded(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_mark_recommendation_succeeded_field_headers_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.MarkRecommendationSucceededRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_succeeded),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation())
await client.mark_recommendation_succeeded(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_mark_recommendation_succeeded_flattened():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_succeeded),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mark_recommendation_succeeded(
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].state_metadata == {'key_value': 'value_value'}
assert args[0].etag == 'etag_value'
def test_mark_recommendation_succeeded_flattened_error():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mark_recommendation_succeeded(
recommender_service.MarkRecommendationSucceededRequest(),
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
@pytest.mark.asyncio
async def test_mark_recommendation_succeeded_flattened_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_succeeded),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.mark_recommendation_succeeded(
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].state_metadata == {'key_value': 'value_value'}
assert args[0].etag == 'etag_value'
@pytest.mark.asyncio
async def test_mark_recommendation_succeeded_flattened_error_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.mark_recommendation_succeeded(
recommender_service.MarkRecommendationSucceededRequest(),
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
def test_mark_recommendation_failed(transport: str = 'grpc', request_type=recommender_service.MarkRecommendationFailedRequest):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_failed),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation(
name='name_value',
description='description_value',
recommender_subtype='recommender_subtype_value',
etag='etag_value',
)
response = client.mark_recommendation_failed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationFailedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.recommender_subtype == 'recommender_subtype_value'
assert response.etag == 'etag_value'
def test_mark_recommendation_failed_from_dict():
test_mark_recommendation_failed(request_type=dict)
def test_mark_recommendation_failed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_failed),
'__call__') as call:
client.mark_recommendation_failed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationFailedRequest()
@pytest.mark.asyncio
async def test_mark_recommendation_failed_async(transport: str = 'grpc_asyncio', request_type=recommender_service.MarkRecommendationFailedRequest):
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_failed),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation(
name='name_value',
description='description_value',
recommender_subtype='recommender_subtype_value',
etag='etag_value',
))
response = await client.mark_recommendation_failed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recommender_service.MarkRecommendationFailedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recommendation.Recommendation)
assert response.name == 'name_value'
assert response.description == 'description_value'
assert response.recommender_subtype == 'recommender_subtype_value'
assert response.etag == 'etag_value'
@pytest.mark.asyncio
async def test_mark_recommendation_failed_async_from_dict():
await test_mark_recommendation_failed_async(request_type=dict)
def test_mark_recommendation_failed_field_headers():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.MarkRecommendationFailedRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_failed),
'__call__') as call:
call.return_value = recommendation.Recommendation()
client.mark_recommendation_failed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_mark_recommendation_failed_field_headers_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recommender_service.MarkRecommendationFailedRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_failed),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation())
await client.mark_recommendation_failed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_mark_recommendation_failed_flattened():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_failed),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mark_recommendation_failed(
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].state_metadata == {'key_value': 'value_value'}
assert args[0].etag == 'etag_value'
def test_mark_recommendation_failed_flattened_error():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mark_recommendation_failed(
recommender_service.MarkRecommendationFailedRequest(),
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
@pytest.mark.asyncio
async def test_mark_recommendation_failed_flattened_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mark_recommendation_failed),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recommendation.Recommendation()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recommendation.Recommendation())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.mark_recommendation_failed(
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].state_metadata == {'key_value': 'value_value'}
assert args[0].etag == 'etag_value'
@pytest.mark.asyncio
async def test_mark_recommendation_failed_flattened_error_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.mark_recommendation_failed(
recommender_service.MarkRecommendationFailedRequest(),
name='name_value',
state_metadata={'key_value': 'value_value'},
etag='etag_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RecommenderGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RecommenderGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RecommenderClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.RecommenderGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RecommenderClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RecommenderGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RecommenderClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.RecommenderGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.RecommenderGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.RecommenderGrpcTransport,
transports.RecommenderGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.RecommenderGrpcTransport,
)
def test_recommender_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RecommenderTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_recommender_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.recommender_v1.services.recommender.transports.RecommenderTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.RecommenderTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'list_insights',
'get_insight',
'mark_insight_accepted',
'list_recommendations',
'get_recommendation',
'mark_recommendation_claimed',
'mark_recommendation_succeeded',
'mark_recommendation_failed',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_recommender_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.recommender_v1.services.recommender.transports.RecommenderTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RecommenderTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_recommender_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.recommender_v1.services.recommender.transports.RecommenderTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RecommenderTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_recommender_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.recommender_v1.services.recommender.transports.RecommenderTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RecommenderTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_recommender_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RecommenderClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_recommender_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RecommenderClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.RecommenderGrpcTransport,
transports.RecommenderGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_recommender_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.RecommenderGrpcTransport,
transports.RecommenderGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_recommender_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.RecommenderGrpcTransport, grpc_helpers),
(transports.RecommenderGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_recommender_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"recommender.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="recommender.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.RecommenderGrpcTransport, transports.RecommenderGrpcAsyncIOTransport])
def test_recommender_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_recommender_host_no_port():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='recommender.googleapis.com'),
)
assert client.transport._host == 'recommender.googleapis.com:443'
def test_recommender_host_with_port():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='recommender.googleapis.com:8000'),
)
assert client.transport._host == 'recommender.googleapis.com:8000'
def test_recommender_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.RecommenderGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_recommender_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.RecommenderGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.RecommenderGrpcTransport, transports.RecommenderGrpcAsyncIOTransport])
def test_recommender_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.RecommenderGrpcTransport, transports.RecommenderGrpcAsyncIOTransport])
def test_recommender_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_insight_path():
project = "squid"
location = "clam"
insight_type = "whelk"
insight = "octopus"
expected = "projects/{project}/locations/{location}/insightTypes/{insight_type}/insights/{insight}".format(project=project, location=location, insight_type=insight_type, insight=insight, )
actual = RecommenderClient.insight_path(project, location, insight_type, insight)
assert expected == actual
def test_parse_insight_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"insight_type": "cuttlefish",
"insight": "mussel",
}
path = RecommenderClient.insight_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_insight_path(path)
assert expected == actual
def test_insight_type_path():
project = "winkle"
location = "nautilus"
insight_type = "scallop"
expected = "projects/{project}/locations/{location}/insightTypes/{insight_type}".format(project=project, location=location, insight_type=insight_type, )
actual = RecommenderClient.insight_type_path(project, location, insight_type)
assert expected == actual
def test_parse_insight_type_path():
expected = {
"project": "abalone",
"location": "squid",
"insight_type": "clam",
}
path = RecommenderClient.insight_type_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_insight_type_path(path)
assert expected == actual
def test_recommendation_path():
project = "whelk"
location = "octopus"
recommender = "oyster"
recommendation = "nudibranch"
expected = "projects/{project}/locations/{location}/recommenders/{recommender}/recommendations/{recommendation}".format(project=project, location=location, recommender=recommender, recommendation=recommendation, )
actual = RecommenderClient.recommendation_path(project, location, recommender, recommendation)
assert expected == actual
def test_parse_recommendation_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
"recommender": "winkle",
"recommendation": "nautilus",
}
path = RecommenderClient.recommendation_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_recommendation_path(path)
assert expected == actual
def test_recommender_path():
project = "scallop"
location = "abalone"
recommender = "squid"
expected = "projects/{project}/locations/{location}/recommenders/{recommender}".format(project=project, location=location, recommender=recommender, )
actual = RecommenderClient.recommender_path(project, location, recommender)
assert expected == actual
def test_parse_recommender_path():
expected = {
"project": "clam",
"location": "whelk",
"recommender": "octopus",
}
path = RecommenderClient.recommender_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_recommender_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = RecommenderClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = RecommenderClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = RecommenderClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = RecommenderClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = RecommenderClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = RecommenderClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = RecommenderClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = RecommenderClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = RecommenderClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = RecommenderClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RecommenderClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.RecommenderTransport, '_prep_wrapped_messages') as prep:
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.RecommenderTransport, '_prep_wrapped_messages') as prep:
transport_class = RecommenderClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = RecommenderAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = RecommenderClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 38.318248 | 231 | 0.677354 |
7944c210ee7eab003def3a06bb13ba0b82f3c185 | 1,701 | py | Python | core/migrations/0001_initial.py | prashant111188/recipe-app-api | 8894ea7f8dc269583011368832876cc1fca8cc6a | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | prashant111188/recipe-app-api | 8894ea7f8dc269583011368832876cc1fca8cc6a | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | prashant111188/recipe-app-api | 8894ea7f8dc269583011368832876cc1fca8cc6a | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2020-03-29 13:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412 | 266 | 0.637272 |
7944c24e2721000c0ef6e01727787298bc6332dc | 707 | py | Python | examples/juniper/get-xnm-information.py | tcpcloud/debian-python-ncclient | fff818e60f069a286db4818e91c7fc6177d17028 | [
"Apache-2.0"
] | null | null | null | examples/juniper/get-xnm-information.py | tcpcloud/debian-python-ncclient | fff818e60f069a286db4818e91c7fc6177d17028 | [
"Apache-2.0"
] | null | null | null | examples/juniper/get-xnm-information.py | tcpcloud/debian-python-ncclient | fff818e60f069a286db4818e91c7fc6177d17028 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from ncclient import manager
from ncclient.xml_ import *
def connect(host, port, user, password, source):
conn = manager.connect(host=host,
port=port,
username=user,
password=password,
timeout=600,
device_params = {'name':'junos'},
hostkey_verify=False)
rpc = """
<get-xnm-information>
<type>xml-schema</type>
<namespace>junos-configuration</namespace>
</get-xnm-information>"""
result = conn.rpc(rpc)
fh = open('schema.txt', 'w')
fh.write(result.tostring)
fh.close()
if __name__ == '__main__':
connect('router', 830, 'netconf', 'juniper!', 'candidate')
| 23.566667 | 62 | 0.591231 |
7944c3858e69fc26de106c7ab36e752e6c579296 | 1,418 | py | Python | cdir_cli/user_interface/scroll_position.py | EskelinenAntti/nav | a2e4ad8c5f1581bf47da0a390422235887c49fc7 | [
"MIT"
] | 16 | 2020-05-22T09:15:15.000Z | 2021-06-15T11:07:12.000Z | cdir_cli/user_interface/scroll_position.py | EskelinenAntti/nav | a2e4ad8c5f1581bf47da0a390422235887c49fc7 | [
"MIT"
] | 1 | 2020-05-25T12:00:29.000Z | 2020-05-28T13:46:57.000Z | cdir_cli/user_interface/scroll_position.py | EskelinenAntti/nav | a2e4ad8c5f1581bf47da0a390422235887c49fc7 | [
"MIT"
] | 1 | 2021-04-05T16:18:07.000Z | 2021-04-05T16:18:07.000Z | class ScrollPosition():
def __init__(self, visible_height, total_height):
self.clear(visible_height, total_height)
def move_down(self):
self.last_visible_index = min(self.last_visible_index + 1,
self.__get_last_index())
def move_up(self):
self.last_visible_index = max(self.last_visible_index - 1,
0)
def update_visible_height(self, new_visible_height):
if not self.__visible_items_fill_screen(new_visible_height,
self.last_visible_index):
self.last_visible_index = new_visible_height - 1
self.visible_height = new_visible_height
def clear(self, new_total_height, new_visible_height):
self.last_visible_index = 0
self.total_height = new_total_height
self.visible_height = new_visible_height
def content_changed(self, new_total_height):
self.last_visible_index = 0
self.total_height = new_total_height
def get_first_visible_index(self):
return max(0, self.last_visible_index + 1 - self.visible_height)
def __get_last_index(self):
return self.total_height - 1
def __visible_items_fill_screen(self, visible_height, last_visible_index):
if visible_height < self.total_height:
return last_visible_index + 1 >= visible_height
| 36.358974 | 78 | 0.659379 |
7944c466a284a9a9a8f4305652fc4ae0775388d0 | 242 | py | Python | exercises/conditionals.py | yair19-meet/y2s18-python_review | 303ba7c2ffa32a095ab1630c1b4d0398a72771ff | [
"MIT"
] | null | null | null | exercises/conditionals.py | yair19-meet/y2s18-python_review | 303ba7c2ffa32a095ab1630c1b4d0398a72771ff | [
"MIT"
] | null | null | null | exercises/conditionals.py | yair19-meet/y2s18-python_review | 303ba7c2ffa32a095ab1630c1b4d0398a72771ff | [
"MIT"
] | null | null | null | # Write your solution for 1.2 here!
sum = 0
# for i in range(101):
# if i % 2 == 0:
# sum = sum + i
# print(sum)
for i in range(1000, 0, -1):
if i % 6 == 2 and i*i % 5 == 3:
print (i)
break
| 16.133333 | 35 | 0.429752 |
7944c4fd26e2ce8c2ce8560ff85294d19d8d3571 | 21,586 | py | Python | ax/plot/helper.py | gbmarc1/Ax | 9428fa64a621cf4562c7e2c63881a0ca2fa2780b | [
"MIT"
] | null | null | null | ax/plot/helper.py | gbmarc1/Ax | 9428fa64a621cf4562c7e2c63881a0ca2fa2780b | [
"MIT"
] | null | null | null | ax/plot/helper.py | gbmarc1/Ax | 9428fa64a621cf4562c7e2c63881a0ca2fa2780b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
from ax.core.generator_run import GeneratorRun
from ax.core.observation import ObservationFeatures
from ax.core.parameter import ChoiceParameter, FixedParameter, RangeParameter
from ax.core.types import TParameterization
from ax.modelbridge.base import ModelBridge
from ax.modelbridge.transforms.ivw import IVW
from ax.plot.base import DECIMALS, PlotData, PlotInSampleArm, PlotOutOfSampleArm, Z
from ax.utils.common.logger import get_logger
logger = get_logger(name="PlotHelper")
# Typing alias
RawData = List[Dict[str, Union[str, float]]]
TNullableGeneratorRunsDict = Optional[Dict[str, GeneratorRun]]
def _format_dict(param_dict: TParameterization, name: str = "Parameterization") -> str:
"""Format a dictionary for labels.
Args:
param_dict: Dictionary to be formatted
name: String name of the thing being formatted.
Returns: stringified blob.
"""
if len(param_dict) >= 10:
blob = "{} has too many items to render on hover ({}).".format(
name, len(param_dict)
)
else:
blob = "<br><em>{}:</em><br>{}".format(
name, "<br>".join("{}: {}".format(n, v) for n, v in param_dict.items())
)
return blob
def _wrap_metric(metric_name: str) -> str:
"""Put a newline on "::" for metric names.
Args:
metric_name: metric name.
Returns: wrapped metric name.
"""
if "::" in metric_name:
return "<br>".join(metric_name.split("::"))
else:
return metric_name
def _format_CI(estimate: float, sd: float, relative: bool, zval: float = Z) -> str:
"""Format confidence intervals given estimate and standard deviation.
Args:
estimate: point estimate.
sd: standard deviation of point estimate.
relative: if True, '%' is appended.
zval: z-value associated with desired CI (e.g. 1.96 for 95% CIs)
Returns: formatted confidence interval.
"""
return "[{lb:.{digits}f}{perc}, {ub:.{digits}f}{perc}]".format(
lb=estimate - zval * sd,
ub=estimate + zval * sd,
digits=DECIMALS,
perc="%" if relative else "",
)
def arm_name_to_tuple(arm_name: str) -> Union[Tuple[int, int], Tuple[int]]:
tup = arm_name.split("_")
if len(tup) == 2:
try:
return (int(tup[0]), int(tup[1]))
except ValueError:
return (0,)
return (0,)
def resize_subtitles(figure: Dict[str, Any], size: int):
for ant in figure["layout"]["annotations"]:
ant["font"].update(size=size)
return figure
def _filter_dict(
param_dict: TParameterization, subset_keys: List[str]
) -> TParameterization:
"""Filter a dictionary to keys present in a given list."""
return {k: v for k, v in param_dict.items() if k in subset_keys}
def _get_in_sample_arms(
model: ModelBridge,
metric_names: Set[str],
fixed_features: Optional[ObservationFeatures] = None,
) -> Tuple[Dict[str, PlotInSampleArm], RawData, Dict[str, TParameterization]]:
"""Get in-sample arms from a model with observed and predicted values
for specified metrics.
Returns a PlotInSampleArm object in which repeated observations are merged
with IVW, and a RawData object in which every observation is listed.
Fixed features input can be used to override fields of the insample arms
when making model predictions.
Args:
model: An instance of the model bridge.
metric_names: Restrict predictions to these metrics. If None, uses all
metrics in the model.
Returns:
A tuple containing
- Map from arm name to PlotInSampleArm.
- List of the data for each observation like::
{'metric_name': 'likes', 'arm_name': '0_0', 'mean': 1., 'sem': 0.1}
- Map from arm name to parameters
"""
observations = model.get_training_data()
# Calculate raw data
raw_data = []
cond_name_to_parameters = {}
for obs in observations:
cond_name_to_parameters[obs.arm_name] = obs.features.parameters
for j, metric_name in enumerate(obs.data.metric_names):
if metric_name in metric_names:
raw_data.append(
{
"metric_name": metric_name,
"arm_name": obs.arm_name,
"mean": obs.data.means[j],
"sem": np.sqrt(obs.data.covariance[j, j]),
}
)
# Check that we have one ObservationFeatures per arm name since we
# key by arm name and the model is not Multi-task.
# If "TrialAsTask" is present, one of the arms is also chosen.
if ("TrialAsTask" not in model.transforms.keys()) and (
len(cond_name_to_parameters) != len(observations)
):
logger.error(
"Have observations of arms with different features but same"
" name. Arbitrary one will be plotted."
)
# Merge multiple measurements within each Observation with IVW to get
# un-modeled prediction
t = IVW(None, [], [])
obs_data = t.transform_observation_data([obs.data for obs in observations], [])
# Start filling in plot data
in_sample_plot: Dict[str, PlotInSampleArm] = {}
for i, obs in enumerate(observations):
if obs.arm_name is None:
raise ValueError("Observation must have arm name for plotting.")
# Extract raw measurement
obs_y = {}
obs_se = {}
# Use the IVW data, not obs.data
for j, metric_name in enumerate(obs_data[i].metric_names):
if metric_name in metric_names:
obs_y[metric_name] = obs_data[i].means[j]
obs_se[metric_name] = np.sqrt(obs_data[i].covariance[j, j])
# Make a prediction.
if model.training_in_design[i]:
features = obs.features
if fixed_features is not None:
features.update_features(fixed_features)
pred_y, pred_se = _predict_at_point(model, features, metric_names)
else:
# Use raw data for out-of-design points
pred_y = obs_y
pred_se = obs_se
# pyre-fixme[6]: Expected `str` for 1st param but got `Optional[str]`.
in_sample_plot[obs.arm_name] = PlotInSampleArm(
# pyre-fixme[6]: Expected `str` for 1st param but got `Optional[str]`.
name=obs.arm_name,
y=obs_y,
se=obs_se,
parameters=obs.features.parameters,
y_hat=pred_y,
se_hat=pred_se,
context_stratum=None,
)
return in_sample_plot, raw_data, cond_name_to_parameters
def _predict_at_point(
model: ModelBridge, obsf: ObservationFeatures, metric_names: Set[str]
) -> Tuple[Dict[str, float], Dict[str, float]]:
"""Make a prediction at a point.
Returns mean and standard deviation in format expected by plotting.
Args:
model: ModelBridge
obsf: ObservationFeatures for which to predict
metric_names: Limit predictions to these metrics.
Returns:
A tuple containing
- Map from metric name to prediction.
- Map from metric name to standard error.
"""
y_hat = {}
se_hat = {}
f_pred, cov_pred = model.predict([obsf])
for metric_name in f_pred:
if metric_name in metric_names:
y_hat[metric_name] = f_pred[metric_name][0]
se_hat[metric_name] = np.sqrt(cov_pred[metric_name][metric_name][0])
return y_hat, se_hat
def _get_out_of_sample_arms(
model: ModelBridge,
generator_runs_dict: Dict[str, GeneratorRun],
metric_names: Set[str],
fixed_features: Optional[ObservationFeatures] = None,
) -> Dict[str, Dict[str, PlotOutOfSampleArm]]:
"""Get out-of-sample predictions from a model given a dict of generator runs.
Fixed features input can be used to override fields of the candidate arms
when making model predictions.
Args:
model: The model.
generator_runs_dict: a mapping from generator run name to generator run.
metric_names: metrics to include in the plot.
Returns:
A mapping from name to a mapping from arm name to plot.
"""
out_of_sample_plot: Dict[str, Dict[str, PlotOutOfSampleArm]] = {}
for generator_run_name, generator_run in generator_runs_dict.items():
out_of_sample_plot[generator_run_name] = {}
for arm in generator_run.arms:
# This assumes context is None
obsf = ObservationFeatures.from_arm(arm)
if fixed_features is not None:
obsf.update_features(fixed_features)
# Make a prediction
try:
pred_y, pred_se = _predict_at_point(model, obsf, metric_names)
except Exception:
# Check if it is an out-of-design arm.
if not model.model_space.check_membership(obsf.parameters):
# Skip this point
continue
else:
# It should have worked
raise
arm_name = arm.name_or_short_signature
out_of_sample_plot[generator_run_name][arm_name] = PlotOutOfSampleArm(
name=arm_name,
parameters=obsf.parameters,
y_hat=pred_y,
se_hat=pred_se,
context_stratum=None,
)
return out_of_sample_plot
def get_plot_data(
model: ModelBridge,
generator_runs_dict: Dict[str, GeneratorRun],
metric_names: Optional[Set[str]] = None,
fixed_features: Optional[ObservationFeatures] = None,
) -> Tuple[PlotData, RawData, Dict[str, TParameterization]]:
"""Format data object with metrics for in-sample and out-of-sample
arms.
Calculate both observed and predicted metrics for in-sample arms.
Calculate predicted metrics for out-of-sample arms passed via the
`generator_runs_dict` argument.
In PlotData, in-sample observations are merged with IVW. In RawData, they
are left un-merged and given as a list of dictionaries, one for each
observation and having keys 'arm_name', 'mean', and 'sem'.
Args:
model: The model.
generator_runs_dict: a mapping from generator run name to generator run.
metric_names: Restrict predictions to this set. If None, all metrics
in the model will be returned.
fixed_features: Fixed features to use when making model predictions.
Returns:
A tuple containing
- PlotData object with in-sample and out-of-sample predictions.
- List of observations like::
{'metric_name': 'likes', 'arm_name': '0_1', 'mean': 1., 'sem': 0.1}.
- Mapping from arm name to parameters.
"""
metrics_plot = model.metric_names if metric_names is None else metric_names
in_sample_plot, raw_data, cond_name_to_parameters = _get_in_sample_arms(
model=model, metric_names=metrics_plot, fixed_features=fixed_features
)
out_of_sample_plot = _get_out_of_sample_arms(
model=model,
generator_runs_dict=generator_runs_dict,
metric_names=metrics_plot,
fixed_features=fixed_features,
)
# pyre-fixme[16]: `Optional` has no attribute `arm_name`.
status_quo_name = None if model.status_quo is None else model.status_quo.arm_name
plot_data = PlotData(
metrics=list(metrics_plot),
in_sample=in_sample_plot,
out_of_sample=out_of_sample_plot,
status_quo_name=status_quo_name,
)
return plot_data, raw_data, cond_name_to_parameters
def get_range_parameter(model: ModelBridge, param_name: str) -> RangeParameter:
"""
Get the range parameter with the given name from the model.
Throws if parameter doesn't exist or is not a range parameter.
Args:
model: The model.
param_name: The name of the RangeParameter to be found.
Returns: The RangeParameter named `param_name`.
"""
range_param = model.model_space.parameters.get(param_name)
if range_param is None:
raise ValueError(f"Parameter `{param_name}` does not exist.")
if not isinstance(range_param, RangeParameter):
raise ValueError(f"{param_name} is not a RangeParameter")
return range_param
def get_range_parameters(model: ModelBridge) -> List[RangeParameter]:
"""
Get a list of range parameters from a model.
Args:
model: The model.
Returns: List of RangeParameters.
"""
return [
parameter
for parameter in model.model_space.parameters.values()
if isinstance(parameter, RangeParameter)
]
def get_grid_for_parameter(parameter: RangeParameter, density: int) -> np.ndarray:
"""Get a grid of points along the range of the parameter.
Will be a log-scale grid if parameter is log scale.
Args:
parameter: Parameter for which to generate grid.
density: Number of points in the grid.
"""
is_log = parameter.log_scale
if is_log:
grid = np.linspace(
np.log10(parameter.lower), np.log10(parameter.upper), density
)
grid = 10 ** grid
else:
grid = np.linspace(parameter.lower, parameter.upper, density)
return grid
def get_fixed_values(
model: ModelBridge, slice_values: Optional[Dict[str, Any]] = None
) -> TParameterization:
"""Get fixed values for parameters in a slice plot.
If there is an in-design status quo, those values will be used. Otherwise,
the mean of RangeParameters or the mode of ChoiceParameters is used.
Any value in slice_values will override the above.
Args:
model: ModelBridge being used for plotting
slice_values: Map from parameter name to value at which is should be
fixed.
Returns: Map from parameter name to fixed value.
"""
# Check if status_quo is in design
if model.status_quo is not None and model.model_space.check_membership(
# pyre-fixme[16]: `Optional` has no attribute `features`.
model.status_quo.features.parameters
):
setx = model.status_quo.features.parameters
else:
observations = model.get_training_data()
setx = {}
for p_name, parameter in model.model_space.parameters.items():
# Exclude out of design status quo (no parameters)
vals = [
obs.features.parameters[p_name]
for obs in observations
if (
len(obs.features.parameters) > 0
and parameter.validate(obs.features.parameters[p_name])
)
]
if isinstance(parameter, FixedParameter):
setx[p_name] = parameter.value
elif isinstance(parameter, ChoiceParameter):
setx[p_name] = Counter(vals).most_common(1)[0][0]
elif isinstance(parameter, RangeParameter):
setx[p_name] = parameter._cast(np.mean(vals))
if slice_values is not None:
# slice_values has type Dictionary[str, Any]
setx.update(slice_values)
return setx
# Utility methods ported from JS
def contour_config_to_trace(config):
# Load from config
arm_data = config["arm_data"]
density = config["density"]
grid_x = config["grid_x"]
grid_y = config["grid_y"]
f = config["f"]
lower_is_better = config["lower_is_better"]
metric = config["metric"]
rel = config["rel"]
sd = config["sd"]
xvar = config["xvar"]
yvar = config["yvar"]
green_scale = config["green_scale"]
green_pink_scale = config["green_pink_scale"]
blue_scale = config["blue_scale"]
# format data
res = relativize_data(f, sd, rel, arm_data, metric)
f_final = res[0]
sd_final = res[1]
# calculate max of abs(outcome), used for colorscale
f_absmax = max(abs(min(f_final)), max(f_final))
# transform to nested array
f_plt = []
for ind in range(0, len(f_final), density):
f_plt.append(f_final[ind : ind + density])
sd_plt = []
for ind in range(0, len(sd_final), density):
sd_plt.append(sd_final[ind : ind + density])
CONTOUR_CONFIG = {
"autocolorscale": False,
"autocontour": True,
"contours": {"coloring": "heatmap"},
"hoverinfo": "x+y+z",
"ncontours": density / 2,
"type": "contour",
"x": grid_x,
"y": grid_y,
}
if rel:
f_scale = reversed(green_pink_scale) if lower_is_better else green_pink_scale
else:
f_scale = green_scale
f_trace = {
"colorbar": {
"x": 0.45,
"y": 0.5,
"ticksuffix": "%" if rel else "",
"tickfont": {"size": 8},
},
"colorscale": [(i / (len(f_scale) - 1), rgb(v)) for i, v in enumerate(f_scale)],
"xaxis": "x",
"yaxis": "y",
"z": f_plt,
# zmax and zmin are ignored if zauto is true
"zauto": not rel,
"zmax": f_absmax,
"zmin": -f_absmax,
}
sd_trace = {
"colorbar": {
"x": 1,
"y": 0.5,
"ticksuffix": "%" if rel else "",
"tickfont": {"size": 8},
},
"colorscale": [
(i / (len(blue_scale) - 1), rgb(v)) for i, v in enumerate(blue_scale)
],
"xaxis": "x2",
"yaxis": "y2",
"z": sd_plt,
}
f_trace.update(CONTOUR_CONFIG)
sd_trace.update(CONTOUR_CONFIG)
# get in-sample arms
arm_text = list(arm_data["in_sample"].keys())
arm_x = [
arm_data["in_sample"][arm_name]["parameters"][xvar] for arm_name in arm_text
]
arm_y = [
arm_data["in_sample"][arm_name]["parameters"][yvar] for arm_name in arm_text
]
# configs for in-sample arms
base_in_sample_arm_config = {
"hoverinfo": "text",
"legendgroup": "In-sample",
"marker": {"color": "black", "symbol": 1, "opacity": 0.5},
"mode": "markers",
"name": "In-sample",
"text": arm_text,
"type": "scatter",
"x": arm_x,
"y": arm_y,
}
f_in_sample_arm_trace = {"xaxis": "x", "yaxis": "y"}
sd_in_sample_arm_trace = {"showlegend": False, "xaxis": "x2", "yaxis": "y2"}
f_in_sample_arm_trace.update(base_in_sample_arm_config)
sd_in_sample_arm_trace.update(base_in_sample_arm_config)
traces = [f_trace, sd_trace, f_in_sample_arm_trace, sd_in_sample_arm_trace]
# iterate over out-of-sample arms
for i, generator_run_name in enumerate(arm_data["out_of_sample"].keys()):
symbol = i + 2 # symbols starts from 2 for candidate markers
ax = []
ay = []
atext = []
for arm_name in arm_data["out_of_sample"][generator_run_name].keys():
ax.append(
arm_data["out_of_sample"][generator_run_name][arm_name]["parameters"][
xvar
]
)
ay.append(
arm_data["out_of_sample"][generator_run_name][arm_name]["parameters"][
yvar
]
)
atext.append("<em>Candidate " + arm_name + "</em>")
traces.append(
{
"hoverinfo": "text",
"legendgroup": generator_run_name,
"marker": {"color": "black", "symbol": symbol, "opacity": 0.5},
"mode": "markers",
"name": generator_run_name,
"text": atext,
"type": "scatter",
"xaxis": "x",
"x": ax,
"yaxis": "y",
"y": ay,
}
)
traces.append(
{
"hoverinfo": "text",
"legendgroup": generator_run_name,
"marker": {"color": "black", "symbol": symbol, "opacity": 0.5},
"mode": "markers",
"name": "In-sample",
"showlegend": False,
"text": atext,
"type": "scatter",
"x": ax,
"xaxis": "x2",
"y": ay,
"yaxis": "y2",
}
)
return traces
def axis_range(grid: List[float], is_log: bool) -> List[float]:
if is_log:
return [math.log10(min(grid)), math.log10(max(grid))]
else:
return [min(grid), max(grid)]
def _relativize(m_t: float, sem_t: float, m_c: float, sem_c: float) -> List[float]:
r_hat = (m_t - m_c) / abs(m_c) - sem_c ** 2 * m_t / abs(m_c) ** 3
variance = (sem_t ** 2 + (m_t / m_c * sem_c) ** 2) / m_c ** 2
return [r_hat, math.sqrt(variance)]
def relativize_data(
f: List[float], sd: List[float], rel: bool, arm_data: Dict[Any, Any], metric: str
) -> List[List[float]]:
# if relative, extract status quo & compute ratio
f_final = [] if rel else f
sd_final = [] if rel else sd
if rel:
f_sq = arm_data["in_sample"][arm_data["status_quo_name"]]["y"][metric]
sd_sq = arm_data["in_sample"][arm_data["status_quo_name"]]["se"][metric]
for i in range(len(f)):
res = _relativize(f[i], sd[i], f_sq, sd_sq)
f_final.append(100 * res[0])
sd_final.append(100 * res[1])
return [f_final, sd_final]
def rgb(arr: List[int]) -> str:
return "rgb({},{},{})".format(*arr)
| 33.209231 | 88 | 0.608589 |
7944c5df5487b685f4ac3f2fb5bf068f86b74756 | 26,711 | py | Python | tensor2tensor/utils/decoding.py | JosvanderWesthuizen/tensor2tensor | 342e214dea360a7f472fc82f3dd0775d7e224c52 | [
"Apache-2.0"
] | 1 | 2018-07-04T02:28:37.000Z | 2018-07-04T02:28:37.000Z | tensor2tensor/utils/decoding.py | zhaopufeng/tensor2tensor | 7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/utils/decoding.py | zhaopufeng/tensor2tensor | 7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3 | [
"Apache-2.0"
] | 1 | 2019-04-12T10:53:17.000Z | 2019-04-12T10:53:17.000Z | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoding utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import operator
import os
import time
import numpy as np
import six
from six.moves import input # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import problem as problem_lib
from tensor2tensor.data_generators import text_encoder
import tensorflow as tf
FLAGS = tf.flags.FLAGS
# Number of samples to draw for an image input (in such cases as captioning)
IMAGE_DECODE_LENGTH = 100
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = tf.contrib.training.HParams(
save_images=False,
log_results=True,
extra_length=100,
batch_size=0,
beam_size=4,
alpha=0.6,
return_beams=False,
write_beam_scores=False,
max_input_size=-1,
identity_output=False,
num_samples=-1,
delimiter="\n",
decode_to_file=None,
shards=1,
shard_id=0,
num_decodes=1,
force_decode_length=False)
hp.parse(overrides)
return hp
def log_decode_results(inputs,
outputs,
problem_name,
prediction_idx,
inputs_vocab,
targets_vocab,
targets=None,
save_images=False,
output_dir=None,
identity_output=False,
log_results=True):
"""Log inference results."""
# TODO(lukaszkaiser) refactor this into feature_encoder
is_video = "video" in problem_name or "gym" in problem_name
if is_video:
def fix_and_save_video(vid, prefix):
save_path_template = os.path.join(
output_dir,
"%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx))
# this is only required for predictions
if vid.shape[-1] == 1:
vid = np.squeeze(vid, axis=-1)
save_video(vid, save_path_template)
tf.logging.info("Saving video: {}".format(prediction_idx))
fix_and_save_video(inputs, "inputs")
fix_and_save_video(outputs, "outputs")
fix_and_save_video(targets, "targets")
is_image = "image" in problem_name
decoded_inputs = None
if is_image and save_images:
save_path = os.path.join(
output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx))
show_and_save_image(inputs / 255., save_path)
elif inputs_vocab:
if identity_output:
decoded_inputs = " ".join(map(str, inputs.flatten()))
else:
decoded_inputs = inputs_vocab.decode(_save_until_eos(inputs, is_image))
if log_results and not is_video:
tf.logging.info("Inference results INPUT: %s" % decoded_inputs)
decoded_targets = None
decoded_outputs = None
if identity_output:
decoded_outputs = " ".join(map(str, outputs.flatten()))
if targets is not None:
decoded_targets = " ".join(map(str, targets.flatten()))
else:
decoded_outputs = targets_vocab.decode(_save_until_eos(outputs, is_image))
if targets is not None and log_results:
decoded_targets = targets_vocab.decode(_save_until_eos(targets, is_image))
if not is_video:
tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs)
if targets is not None and log_results and not is_video:
tf.logging.info("Inference results TARGET: %s" % decoded_targets)
return decoded_inputs, decoded_outputs, decoded_targets
def decode_from_dataset(estimator,
problem_name,
hparams,
decode_hp,
decode_to_file=None,
dataset_split=None):
"""Perform decoding from dataset."""
tf.logging.info("Performing local inference from dataset for %s.",
str(problem_name))
# We assume that worker_id corresponds to shard number.
shard = decode_hp.shard_id if decode_hp.shards > 1 else None
# Setup decode output directory for any artifacts that may be written out
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
# If decode_hp.batch_size is specified, use a fixed batch size
if decode_hp.batch_size:
hparams.batch_size = decode_hp.batch_size
hparams.use_fixed_batch_size = True
dataset_kwargs = {
"shard": shard,
"dataset_split": dataset_split,
"max_records": decode_hp.num_samples
}
# Build the inference input function
problem = hparams.problem
infer_input_fn = problem.make_estimator_input_fn(
tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs)
output_dirs = []
for decode_id in range(decode_hp.num_decodes):
tf.logging.info("Decoding {}".format(decode_id))
output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id)
tf.gfile.MakeDirs(output_dir)
output_dirs.append(output_dir)
decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=problem,
output_dirs=output_dirs,
hparams=hparams,
decode_hparams=decode_hp))
def decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir):
"""Decodes once."""
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn)
# Prepare output file writers if decode_to_file passed
decode_to_file = decode_to_file or decode_hp.decode_to_file
if decode_to_file:
if decode_hp.shards > 1:
decode_filename = decode_to_file + ("%.2d" % decode_hp.shard_id)
else:
decode_filename = decode_to_file
output_filepath = _decode_filename(decode_filename, problem_name, decode_hp)
parts = output_filepath.split(".")
parts[-1] = "targets"
target_filepath = ".".join(parts)
parts[-1] = "inputs"
input_filepath = ".".join(parts)
output_file = tf.gfile.Open(output_filepath, "w")
target_file = tf.gfile.Open(target_filepath, "w")
input_file = tf.gfile.Open(input_filepath, "w")
problem_hparams = hparams.problem_hparams
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
has_input = "inputs" in problem_hparams.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key]
targets_vocab = problem_hparams.vocabulary["targets"]
for num_predictions, prediction in enumerate(predictions):
num_predictions += 1
inputs = prediction["inputs"]
targets = prediction["targets"]
outputs = prediction["outputs"]
# Log predictions
decoded_outputs = []
decoded_scores = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
scores = None
if "scores" in prediction:
scores = np.split(prediction["scores"], decode_hp.beam_size, axis=0)
for i, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % i)
score = scores and scores[i]
decoded = log_decode_results(
inputs,
beam,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=decode_hp.log_results)
decoded_outputs.append(decoded)
if decode_hp.write_beam_scores:
decoded_scores.append(score)
else:
decoded = log_decode_results(
inputs,
outputs,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=decode_hp.log_results)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for i, (d_input, d_output, d_target) in enumerate(decoded_outputs):
beam_score_str = ""
if decode_hp.write_beam_scores:
beam_score_str = "\t%.2f" % decoded_scores[i]
output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter)
target_file.write(str(d_target) + decode_hp.delimiter)
input_file.write(str(d_input) + decode_hp.delimiter)
if (decode_hp.num_samples >= 0 and
num_predictions >= decode_hp.num_samples):
break
if decode_to_file:
output_file.close()
target_file.close()
input_file.close()
def decode_from_file(estimator,
filename,
hparams,
decode_hp,
decode_to_file=None,
checkpoint_path=None):
"""Compute predictions on entries in filename and write them out."""
if not decode_hp.batch_size:
decode_hp.batch_size = 32
tf.logging.info(
"decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size)
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
p_hp = hparams.problem_hparams
has_input = "inputs" in p_hp.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = p_hp.vocabulary[inputs_vocab_key]
targets_vocab = p_hp.vocabulary["targets"]
problem_name = FLAGS.problem
tf.logging.info("Performing decoding from a file.")
sorted_inputs, sorted_keys = _get_sorted_inputs(filename, decode_hp.shards,
decode_hp.delimiter)
num_decode_batches = (len(sorted_inputs) - 1) // decode_hp.batch_size + 1
def input_fn():
input_gen = _decode_batch_input_fn(num_decode_batches, sorted_inputs,
inputs_vocab, decode_hp.batch_size,
decode_hp.max_input_size)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
start_time = time.time()
total_time_per_step = 0
total_cnt = 0
def timer(gen):
while True:
try:
start_time = time.time()
item = next(gen)
elapsed_time = time.time() - start_time
yield elapsed_time, item
except StopIteration:
break
for elapsed_time, result in timer(result_iter):
if decode_hp.return_beams:
beam_decodes = []
beam_scores = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
score = scores and scores[k]
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
beam,
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results)
beam_decodes.append(decoded_outputs)
if decode_hp.write_beam_scores:
beam_scores.append(score)
if decode_hp.write_beam_scores:
decodes.append("\t".join([
"\t".join([d, "%.2f" % s])
for d, s in zip(beam_decodes, beam_scores)
]))
else:
decodes.append("\t".join(beam_decodes))
else:
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
result["outputs"],
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results)
decodes.append(decoded_outputs)
total_time_per_step += elapsed_time
total_cnt += result["outputs"].shape[-1]
tf.logging.info("Elapsed Time: %5.5f" % (time.time() - start_time))
tf.logging.info("Averaged Single Token Generation Time: %5.7f" %
(total_time_per_step / total_cnt))
# Reversing the decoded inputs and outputs because they were reversed in
# _decode_batch_input_fn
sorted_inputs.reverse()
decodes.reverse()
# If decode_to_file was provided use it as the output filename without change
# (except for adding shard_id if using more shards for decoding).
# Otherwise, use the input filename plus model, hp, problem, beam, alpha.
decode_filename = decode_to_file if decode_to_file else filename
if decode_hp.shards > 1:
decode_filename += "%.2d" % decode_hp.shard_id
if not decode_to_file:
decode_filename = _decode_filename(decode_filename, problem_name, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter))
def _decode_filename(base_filename, problem_name, decode_hp):
return "{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format(
base=base_filename,
model=FLAGS.model,
hp=FLAGS.hparams_set,
problem=problem_name,
beam=str(decode_hp.beam_size),
alpha=str(decode_hp.alpha))
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None):
"""Interactive decoding."""
def input_fn():
gen_fn = make_input_fn_from_generator(
_interactive_input_fn(hparams, decode_hp))
example = gen_fn()
example = _interactive_input_tensor_to_features_dict(example, hparams)
return example
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
for result in result_iter:
is_image = False # TODO(lukaszkaiser): find out from problem id / class.
targets_vocab = hparams.problem_hparams.vocabulary["targets"]
if decode_hp.return_beams:
beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = targets_vocab.decode(_save_until_eos(beam, is_image))
if scores is not None:
tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k]))
else:
tf.logging.info("\"%s\"" % beam_string)
else:
if decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
tf.logging.info(
targets_vocab.decode(_save_until_eos(result["outputs"], is_image)))
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary,
batch_size, max_input_size):
"""Generator to produce batches of inputs."""
tf.logging.info(" batch %d" % num_decode_batches)
# First reverse all the input sentences so that if you're going to get OOMs,
# you'll see it in the first batch
sorted_inputs.reverse()
for b in range(num_decode_batches):
tf.logging.info("Decoding batch %d" % b)
batch_length = 0
batch_inputs = []
for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:
input_ids = vocabulary.encode(inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
input_ids.append(text_encoder.EOS_ID)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
x = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(x)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
}
def _interactive_input_fn(hparams, decode_hp):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
"""
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.input_modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length, "source_string"
if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
features = {
"inputs": np.array(x).astype(np.int32),
}
elif input_type == "image":
input_path = input_string
img = vocabulary.encode(input_path)
features = {
"inputs": img.astype(np.int32),
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
features = {
"inputs": np.array(x).astype(np.int32),
}
else:
raise Exception("Unsupported input type.")
for k, v in six.iteritems(
problem_lib.problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features
def save_video(video, save_path_template):
"""Save frames of the videos into files."""
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires PIL library to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
for i, frame in enumerate(video):
save_path = save_path_template.format(i)
with tf.gfile.Open(save_path, "wb") as sp:
Image.fromarray(np.uint8(frame)).save(sp)
def show_and_save_image(img, save_path):
"""Shows an image using matplotlib and saves it."""
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
with tf.gfile.Open(save_path, "wb") as sp:
plt.savefig(sp)
def _get_sorted_inputs(filename, num_shards=1, delimiter="\n"):
"""Returning inputs sorted according to length.
Args:
filename: path to file with inputs, 1 per line.
num_shards: number of input shards. If > 1, will read from file filename.XX,
where XX is FLAGS.worker_id.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
"""
tf.logging.info("Getting sorted inputs")
# read file and sort inputs according them according to input length.
if num_shards > 1:
decode_filename = filename + ("%.2d" % FLAGS.worker_id)
else:
decode_filename = filename
with tf.gfile.Open(decode_filename) as f:
text = f.read()
records = text.split(delimiter)
inputs = [record.strip() for record in records]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys
def _save_until_eos(hyp, is_image):
"""Strips everything after the first <EOS> token, which is normally 1."""
hyp = hyp.flatten()
if is_image:
return hyp
try:
index = list(hyp).index(text_encoder.EOS_ID)
return hyp[0:index]
except ValueError:
# No EOS_ID: return the array as-is.
return hyp
def _interactive_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False if len(inputs.get_shape()) < 3 else True
x = inputs
if input_is_image:
x = tf.image.resize_images(x, [299, 299])
x = tf.reshape(x, [1, 299, 299, -1])
x = tf.to_int32(x)
else:
# Remove the batch dimension.
num_samples = x[0]
length = x[2]
x = tf.slice(x, [3], tf.to_int32([length]))
x = tf.reshape(x, [1, -1, 1, 1])
# Transform into a batch of size num_samples to get that many random
# decodes.
x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))
p_hparams = hparams.problem_hparams
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else inputs[1])
features["inputs"] = x
return features
def _decode_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False
x = inputs
p_hparams = hparams.problem_hparams
# Add a third empty dimension
x = tf.expand_dims(x, axis=[2])
x = tf.to_int32(x)
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else tf.shape(x)[1] + 50)
features["inputs"] = x
return features
def latest_checkpoint_step(ckpt_dir):
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if not ckpt:
return None
path = ckpt.model_checkpoint_path
step = int(path.split("-")[-1])
return step
class DecodeHookArgs(collections.namedtuple(
"DecodeHookArgs",
["estimator", "problem", "output_dirs", "hparams", "decode_hparams"])):
pass
def run_postdecode_hooks(decode_hook_args):
"""Run hooks after decodes have run."""
hooks = decode_hook_args.problem.decode_hooks
if not hooks:
return
global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir)
if global_step is None:
tf.logging.info(
"Skipping decode hooks because no checkpoint yet available.")
return
tf.logging.info("Running decode hooks.")
parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir)
final_dir = os.path.join(parent_dir, "decode")
summary_writer = tf.summary.FileWriter(final_dir)
for hook in hooks:
# Isolate each hook in case it creates TF ops
with tf.Graph().as_default():
summaries = hook(decode_hook_args)
if summaries:
summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(summary, global_step)
summary_writer.close()
tf.logging.info("Decode hooks done.")
| 34.68961 | 80 | 0.670398 |
7944c5f358e43c55e2950ce20372d1bd70b31729 | 6,723 | py | Python | noxfile.py | SciTools-incubator/iris-ugrid | ce035e6de456b5ab9df5daa9d164c59baddd7552 | [
"BSD-3-Clause"
] | 4 | 2020-07-28T09:27:26.000Z | 2021-07-06T10:16:54.000Z | noxfile.py | SciTools-incubator/iris-ugrid | ce035e6de456b5ab9df5daa9d164c59baddd7552 | [
"BSD-3-Clause"
] | 30 | 2020-07-27T16:34:25.000Z | 2021-07-07T11:17:17.000Z | noxfile.py | SciTools-incubator/iris-ugrid | ce035e6de456b5ab9df5daa9d164c59baddd7552 | [
"BSD-3-Clause"
] | 6 | 2020-07-28T09:27:47.000Z | 2020-11-04T09:43:49.000Z | """
Perform test automation with nox.
For further details, see https://nox.thea.codes/en/stable/#
"""
from hashlib import sha256
import os
from pathlib import Path
from shutil import rmtree
from urllib.request import urlretrieve
from zipfile import ZipFile
import nox
#: Default to reusing any pre-existing nox environments.
nox.options.reuse_existing_virtualenvs = True
#: Name of the package to test.
PACKAGE = Path("iris_ugrid").absolute()
#: Cirrus-CI environment variable hook.
PY_VER = os.environ.get("PY_VER", "3.7")
# Git commit of iris that iris-ugrid depends on.
with Path("requirements").joinpath("manual", "iris_commit.txt").open() as fi:
IRIS_COMMIT = fi.read().strip()
def venv_cached(session, cache_info_path, env_spec_path, iris_commit):
"""
Determine whether the nox session environment has been cached.
Parameters
----------
session: object
A `nox.sessions.Session` object.
cache_info_path: Path
A Path object pointing to the expected directory that would contain
cache info.
env_spec_path: pathlib.Path
A Path object pointing to the conda env spec YAML for Iris-ugrid.
iris_commit : str
The string for the Iris commit Iris-ugrid is dependent on.
Returns
-------
bool
Whether the session has been cached.
"""
result = False
cache_env_spec = cache_info_path / env_spec_path.name
cache_iris_commit = cache_info_path / "iris-commit"
caches_found = all(
[file.is_file() for file in (cache_env_spec, cache_iris_commit)]
)
if caches_found:
with env_spec_path.open("rb") as fi:
expected = sha256(fi.read()).hexdigest()
with cache_env_spec.open("r") as fi:
actual = fi.read()
ok_env_spec = actual == expected
expected = iris_commit
with cache_iris_commit.open("r") as fi:
actual = fi.read()
ok_iris_commit = actual == expected
result = ok_env_spec and ok_iris_commit
return result
def cache_venv(session, cache_info_path, env_spec_path, iris_commit):
"""
Cache the nox session environment.
This consists of saving a hexdigest (sha256) of the associated
conda requirements YAML file.
Parameters
----------
session: object
A `nox.sessions.Session` object.
cache_info_path: pathlib.Path
A Path object denoting the directory that cache info should be written
to.
env_spec_path: pathlib.Path
A Path object pointing to the conda env spec YAML for Iris-ugrid.
iris_commit: str
The string for the Iris commit Iris-ugrid is dependent on.
"""
if not cache_info_path.is_dir():
cache_info_path.mkdir()
with env_spec_path.open("rb") as fi:
hexdigest = sha256(fi.read()).hexdigest()
cache_env_spec = cache_info_path / env_spec_path.name
with cache_env_spec.open("w+") as fo:
fo.write(hexdigest)
cache_iris_commit = cache_info_path / "iris-commit"
with cache_iris_commit.open("w+") as fo:
fo.write(iris_commit)
@nox.session
def flake8(session):
"""
Perform flake8 linting of iris-ugrid.
Parameters
----------
session: object
A `nox.sessions.Session` object.
"""
# Pip install the session requirements.
session.install("flake8")
# Execute the flake8 linter on the package.
session.run("flake8", str(PACKAGE))
# Execute the flake8 linter on this file.
session.run("flake8", __file__)
@nox.session
def black(session):
"""
Perform black format checking of iris-ugrid.
Parameters
----------
session: object
A `nox.sessions.Session` object.
"""
# Pip install the session requirements.
session.install("black==19.10b0")
# Execute the black format checker on the package.
session.run("black", "--check", str(PACKAGE))
# Execute the black format checker on this file.
session.run("black", "--check", __file__)
@nox.session(python=[PY_VER], venv_backend="conda")
def tests(session):
"""
Perform iris-ugrid tests.
Parameters
----------
session: object
A `nox.sessions.Session` object.
Notes
-----
See
- https://github.com/theacodes/nox/issues/346
- https://github.com/theacodes/nox/issues/260
"""
INSTALL_DIR = Path().cwd().absolute()
env_spec_self = (
INSTALL_DIR
/ "requirements"
/ "ci"
/ f"py{PY_VER.replace('.', '')}.yml"
)
IRIS_DIR = Path(session.virtualenv.location) / "iris"
cache_info_path = Path(session.virtualenv.location) / "nox_cache_info"
if not venv_cached(session, cache_info_path, env_spec_self, IRIS_COMMIT):
def conda_env_update(env_spec_path):
# Back-door approach to force nox to use "conda env update".
command = (
f"conda env update --prefix={session.virtualenv.location} "
f"--file={env_spec_path}"
)
command = command.split(" ")
session._run(*command, silent=True, external="error")
# Download Iris.
github_archive_url = (
f"https://github.com/SciTools/iris/archive/{IRIS_COMMIT}.zip"
)
iris_zip = Path(urlretrieve(github_archive_url, "iris.zip")[0])
with ZipFile(iris_zip, "r") as zip_open:
zip_open.extractall()
if IRIS_DIR.is_dir():
rmtree(IRIS_DIR)
Path(f"iris-{IRIS_COMMIT}").rename(IRIS_DIR)
iris_zip.unlink()
# Install Iris dependencies.
env_spec_iris = (
IRIS_DIR
/ "requirements"
/ "ci"
/ f"py{PY_VER.replace('.', '')}.yml"
)
conda_env_update(env_spec_iris)
# Configure Iris.
site_cfg_content = [
"[Resources]",
f"test_data_dir = {os.environ['IRIS_TEST_DATA_DIR']}/test_data",
f"doc_dir = {IRIS_DIR / 'docs' / 'iris'}",
"[System]",
f"udunits2_path = {session.virtualenv.location}/lib/libudunits2.so",
]
site_cfg_path = IRIS_DIR / "lib" / "iris" / "etc" / "site.cfg"
with site_cfg_path.open("w+") as site_cfg:
site_cfg.writelines(line + "\n" for line in site_cfg_content)
# Install Iris.
os.chdir(IRIS_DIR)
session.run(*"python setup.py install".split(" "), silent=True)
#######################################################################
# Install dependencies.
conda_env_update(env_spec_self)
cache_venv(session, cache_info_path, env_spec_self, IRIS_COMMIT)
session.run("pytest", "-v", str(PACKAGE))
| 27.896266 | 80 | 0.621598 |
7944c69ecd7da4c0664baf5448898b4d574ef833 | 2,224 | py | Python | day4/part1.py | jessecambon/advent_of_code_2021 | 1ace59f402cf956d373200788806bc10a51c23b3 | [
"MIT"
] | null | null | null | day4/part1.py | jessecambon/advent_of_code_2021 | 1ace59f402cf956d373200788806bc10a51c23b3 | [
"MIT"
] | null | null | null | day4/part1.py | jessecambon/advent_of_code_2021 | 1ace59f402cf956d373200788806bc10a51c23b3 | [
"MIT"
] | null | null | null | import numpy as np
bingo_boards = []
bingo_square_txt = []
# Read in bingo squares as 2d numpy arrays in a list
# and bingo numbers as a single 1d numpy array
with open('input.txt') as f:
line_index = 0
for line in f:
if line_index == 0:
bingo_numbers = np.fromstring(line.strip(), sep=',')
line_index += 1
elif line.rstrip() != '':
bingo_square_txt.append(line.strip())
# store bingo square as numpy array
if len(bingo_square_txt) == 5:
bingo_boards.append(np.fromstring(' '.join(bingo_square_txt), sep=' ').reshape(5, 5))
bingo_square_txt = [] # reset bingo square
# Play bingo - reveal one bingo number at a time
bingo_numbers_in_play = np.empty(0) #initialize
bingo = False # has a board won yet?
for i in range(len(bingo_numbers)):
if bingo == True:
break
# select bingo numbers in play for the given round
bingo_numbers_in_play = np.append(bingo_numbers_in_play, bingo_numbers[i])
for board_number, selected_board in enumerate(bingo_boards):
# check rows for bingo
for k in range(5):
row = selected_board[k]
if sum(np.isin(row, bingo_numbers_in_play)) == 5:
print('row:')
print(row)
bingo = True
break
# check columns for bingo
for k in range(5):
column = selected_board[:,k]
if sum(np.isin(column, bingo_numbers_in_play)) == 5:
print('column:')
print(column)
bingo = True
break
if bingo == True:
print('Bingo!')
print('Winning board number: %d' % (board_number+1))
print('Numbers in play:')
print(bingo_numbers_in_play)
unscored_numbers = selected_board.flatten()[np.isin(selected_board.flatten(), bingo_numbers_in_play, invert = True)]
print("unscored sum: %d" % sum(unscored_numbers))
print("last bingo number: %d" % bingo_numbers_in_play[-1])
print('product: %d' % (sum(unscored_numbers) * bingo_numbers_in_play[-1]))
break | 35.301587 | 128 | 0.577338 |
7944c715fa15f1d48361419737106631b2de6113 | 4,033 | py | Python | core/controllers/profile.py | aldeka/oppia | aead304c95a282c9ca8035bc25c4794864d07578 | [
"Apache-2.0"
] | 3 | 2015-01-10T23:45:23.000Z | 2015-02-17T10:46:08.000Z | core/controllers/profile.py | aldeka/oppia | aead304c95a282c9ca8035bc25c4794864d07578 | [
"Apache-2.0"
] | null | null | null | core/controllers/profile.py | aldeka/oppia | aead304c95a282c9ca8035bc25c4794864d07578 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the profile page."""
__author__ = '[email protected] (Stephanie Federwisch)'
from core.controllers import base
from core.domain import config_domain
from core.domain import exp_services
from core.domain import user_services
import feconf
import utils
EDITOR_PREREQUISITES_AGREEMENT = config_domain.ConfigProperty(
'editor_prerequisites_agreement', 'UnicodeString',
'The agreement that editors are asked to accept before making any '
'contributions.',
default_value=feconf.DEFAULT_EDITOR_PREREQUISITES_AGREEMENT
)
class ProfilePage(base.BaseHandler):
"""The profile page."""
PAGE_NAME_FOR_CSRF = 'profile'
@base.require_user
def get(self):
"""Handles GET requests."""
self.values.update({
'nav_mode': feconf.NAV_MODE_PROFILE,
})
self.render_template('profile/profile.html')
class ProfileHandler(base.BaseHandler):
"""Provides data for the profile gallery."""
@base.require_user
def get(self):
"""Handles GET requests."""
viewable_exps = (
exp_services.get_explicit_viewer_explorations_summary_dict(
self.user_id))
editable_exps = (
exp_services.get_explicit_editor_explorations_summary_dict(
self.user_id))
owned_exps = exp_services.get_owned_explorations_summary_dict(
self.user_id)
self.values.update({
'viewable': viewable_exps,
'editable': editable_exps,
'owned': owned_exps
})
self.render_json(self.values)
class EditorPrerequisitesPage(base.BaseHandler):
"""The page which prompts for username and acceptance of terms."""
PAGE_NAME_FOR_CSRF = 'editor_prerequisites_page'
@base.require_user
def get(self):
"""Handles GET requests."""
self.values.update({
'agreement': EDITOR_PREREQUISITES_AGREEMENT.value,
'nav_mode': feconf.NAV_MODE_PROFILE,
})
self.render_template('profile/editor_prerequisites.html')
class EditorPrerequisitesHandler(base.BaseHandler):
"""Provides data for the editor prerequisites page."""
PAGE_NAME_FOR_CSRF = 'editor_prerequisites_page'
@base.require_user
def get(self):
"""Handles GET requests."""
user_settings = user_services.get_user_settings(self.user_id)
self.render_json({
'has_agreed_to_terms': bool(user_settings.last_agreed_to_terms),
'username': user_settings.username,
})
@base.require_user
def post(self):
"""Handles POST requests."""
username = self.payload.get('username')
agreed_to_terms = self.payload.get('agreed_to_terms')
if not isinstance(agreed_to_terms, bool) or not agreed_to_terms:
raise self.InvalidInputException(
'In order to edit explorations on this site, you will '
'need to accept the license terms.')
else:
user_services.record_agreement_to_terms(self.user_id)
if user_services.get_username(self.user_id):
# A username has already been set for this user.
self.render_json({})
return
try:
user_services.set_username(self.user_id, username)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
self.render_json({})
| 32.264 | 76 | 0.679147 |
7944c848fb149de3ad10f8d4120114d464e309c1 | 256 | py | Python | tests/test_task.py | georgepsarakis/lambda-pond | 79d1319345c07e95b4a1c7207b148ab04c68dc3f | [
"MIT"
] | null | null | null | tests/test_task.py | georgepsarakis/lambda-pond | 79d1319345c07e95b4a1c7207b148ab04c68dc3f | [
"MIT"
] | null | null | null | tests/test_task.py | georgepsarakis/lambda-pond | 79d1319345c07e95b4a1c7207b148ab04c68dc3f | [
"MIT"
] | null | null | null | import unittest
from lambda_pond import AsyncTask
class TestAsyncTask(unittest.TestCase):
def test_ready(self):
task = AsyncTask(fn=str)
self.assertFalse(task.ready())
task.fulfil(None)
self.assertTrue(task.ready())
| 19.692308 | 39 | 0.679688 |
7944c864885fc44e369bbd980e1c6f646a1afe0e | 6,915 | py | Python | weather.py | tdragon/fmi-hass-custom | 8f69403d1c055960a956949cb4c0e97d6a473b1b | [
"MIT"
] | 25 | 2020-05-02T17:30:24.000Z | 2022-03-05T21:27:06.000Z | weather.py | tdragon/fmi-hass-custom | 8f69403d1c055960a956949cb4c0e97d6a473b1b | [
"MIT"
] | 27 | 2020-09-19T06:56:58.000Z | 2022-03-20T11:41:03.000Z | weather.py | tdragon/fmi-hass-custom | 8f69403d1c055960a956949cb4c0e97d6a473b1b | [
"MIT"
] | 11 | 2020-04-30T05:00:56.000Z | 2022-01-14T19:22:20.000Z | """Support for retrieving meteorological data from FMI (Finnish Meteorological Institute)."""
from dateutil import tz
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_FORECAST_TEMP_LOW,
WeatherEntity,
)
from homeassistant.const import CONF_NAME
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import CONF_DAILY_MODE
from .utils import (
get_weather_symbol
)
from .const import (
_LOGGER, ATTRIBUTION, COORDINATOR,
DOMAIN, MANUFACTURER, NAME
)
PARALLEL_UPDATES = 1
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add an FMI weather entity from a config_entry."""
name = config_entry.data[CONF_NAME]
daily_mode = config_entry.options.get(CONF_DAILY_MODE, False)
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
entity_list = [FMIWeatherEntity(name, coordinator, False)]
if daily_mode:
entity_list.append(FMIWeatherEntity(f"{name} (daily)", coordinator, True))
async_add_entities(entity_list, False)
class FMIWeatherEntity(CoordinatorEntity, WeatherEntity):
"""Define an FMI Weather Entity."""
def __init__(self, name, coordinator, daily_mode):
"""Initialize FMI weather object."""
super().__init__(coordinator)
self._name = name
self._attrs = {}
self._unit_system = "Metric"
self._fmi = coordinator
self._daily_mode = daily_mode
self._id = (
self.coordinator.unique_id
if not daily_mode
else f"{self.coordinator.unique_id}_daily"
)
@property
def name(self):
"""Return the name of the place based on Lat/Long."""
if self._fmi is None or self._fmi.current is None:
return self._name
if self._daily_mode:
return f"{self._fmi.current.place} (daily)"
return self._fmi.current.place
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._id
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.coordinator.unique_id)},
"name": NAME,
"manufacturer": MANUFACTURER,
"entry_type": "service",
}
@property
def available(self):
"""Return if weather data is available from FMI."""
if self._fmi is None:
return False
return self._fmi.current is not None
@property
def temperature(self):
"""Return the temperature."""
if self._fmi is None:
return None
return self._fmi.current.data.temperature.value
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self._fmi is None:
return None
return self._fmi.current.data.temperature.unit
@property
def humidity(self):
"""Return the humidity."""
if self._fmi is None:
return None
return self._fmi.current.data.humidity.value
@property
def precipitation(self):
"""Return the humidity."""
if self._fmi is None:
return None
return self._fmi.current.data.precipitation_amount.value
@property
def wind_speed(self):
"""Return the wind speed."""
if self._fmi is None:
return None
return round(
self._fmi.current.data.wind_speed.value * 3.6, 1
) # Convert m/s to km/hr
@property
def wind_bearing(self):
"""Return the wind bearing."""
if self._fmi is None:
return None
return self._fmi.current.data.wind_direction.value
@property
def pressure(self):
"""Return the pressure."""
if self._fmi is None:
return None
return self._fmi.current.data.pressure.value
@property
def condition(self):
"""Return the condition."""
if self._fmi is None:
return None
return get_weather_symbol(self._fmi.current.data.symbol.value, self._fmi.hass)
@property
def forecast(self):
"""Return the forecast array."""
if self._fmi is None:
_LOGGER.debug("FMI: Coordinator is not available!")
return None
if self._fmi.forecast is None:
return None
if self._daily_mode:
# Daily mode, aggregate forecast for every day
day = 0
data = []
for forecast in self._fmi.forecast.forecasts:
time = forecast.time.astimezone(tz.tzlocal())
if day != time.day:
day = time.day
data.append({
ATTR_FORECAST_TIME: time,
ATTR_FORECAST_CONDITION: get_weather_symbol(forecast.symbol.value),
ATTR_FORECAST_TEMP: forecast.temperature.value,
ATTR_FORECAST_TEMP_LOW: forecast.temperature.value,
ATTR_FORECAST_PRECIPITATION: forecast.precipitation_amount.value,
ATTR_FORECAST_WIND_SPEED: forecast.wind_speed.value,
ATTR_FORECAST_WIND_BEARING: forecast.wind_direction.value,
ATTR_WEATHER_PRESSURE: forecast.pressure.value,
ATTR_WEATHER_HUMIDITY: forecast.humidity.value
})
else:
if data[-1][ATTR_FORECAST_TEMP] < forecast.temperature.value:
data[-1][ATTR_FORECAST_TEMP] = forecast.temperature.value
if data[-1][ATTR_FORECAST_TEMP_LOW] > forecast.temperature.value:
data[-1][ATTR_FORECAST_TEMP_LOW] = forecast.temperature.value
else:
data = [
{
ATTR_FORECAST_TIME: forecast.time.astimezone(tz.tzlocal()),
ATTR_FORECAST_CONDITION: get_weather_symbol(forecast.symbol.value),
ATTR_FORECAST_TEMP: forecast.temperature.value,
ATTR_FORECAST_PRECIPITATION: forecast.precipitation_amount.value,
ATTR_FORECAST_WIND_SPEED: forecast.wind_speed.value,
ATTR_FORECAST_WIND_BEARING: forecast.wind_direction.value,
ATTR_WEATHER_PRESSURE: forecast.pressure.value,
ATTR_WEATHER_HUMIDITY: forecast.humidity.value,
}
for forecast in self._fmi.forecast.forecasts
]
return data
| 31.720183 | 93 | 0.609978 |
7944c9232b2cfc23435c938d0ecff10457f85dc7 | 1,627 | py | Python | Modules/DeepCreamPy/file.py | Gusb3ll/tsukiuncen | 2ae6bcac3cd3c8f224d179299ef46afd9ec87b24 | [
"MIT"
] | 8 | 2022-02-19T16:44:39.000Z | 2022-02-21T08:08:33.000Z | Modules/DeepCreamPy/file.py | Gusb3ll/tsukiuncen | 2ae6bcac3cd3c8f224d179299ef46afd9ec87b24 | [
"MIT"
] | 1 | 2022-03-28T12:51:04.000Z | 2022-03-30T05:31:40.000Z | Modules/DeepCreamPy/file.py | Gusb3ll/Tsuki | 2ae6bcac3cd3c8f224d179299ef46afd9ec87b24 | [
"MIT"
] | null | null | null | import os
def check_file(input_dir, output_dir, Release_version=True):
file_list = []
output_file_list = []
files_removed = []
input_dir = os.listdir(input_dir)
output_dir = os.listdir(output_dir)
for file_in in input_dir:
if not file_in.startswith('.'):
file_list.append(file_in)
if(Release_version is True):
print("\nChecking valid files...")
for file_out in output_dir:
if file_out.lower().endswith('.png'):
output_file_list.append(file_out)
for lhs in file_list:
lhs.lower()
if not lhs.lower().endswith('.png'):
files_removed.append((lhs, 0))
for rhs in output_file_list:
if(lhs == rhs):
files_removed.append((lhs, 1))
print("\n### These files will not be decensored for following reason ###\n")
error_messages(file_list, files_removed)
input("\nPress anything to continue...")
print("\n###################################\n")
return file_list, files_removed
def error_messages(file_list, files_removed):
if files_removed is None:
return
for remove_this, reason in files_removed:
if(file_list is not None):
file_list.remove(remove_this)
if reason == 0:
print(" REMOVED : (" + str(remove_this) + ") is not PNG file format")
elif reason == 1:
print(" REMOVED : (" + str(remove_this) + ") already exists")
elif reason == 2:
print(" REMOVED : (" + str(remove_this) + ") file unreadable")
| 31.288462 | 85 | 0.569146 |
7944c940c20787c2c4877793c8aa925cac761302 | 1,158 | py | Python | src/cms/views/media/media_actions.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
] | null | null | null | src/cms/views/media/media_actions.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
] | 5 | 2021-02-10T02:41:20.000Z | 2022-03-12T00:56:56.000Z | src/cms/views/media/media_actions.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
] | null | null | null | """
This module contains view actions for media related objects.
"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.views.decorators.http import require_POST
from ...decorators import region_permission_required
from ...models import Document, Region
@require_POST
@login_required
@region_permission_required
# pylint: disable=unused-argument
def delete_file(request, document_id, region_slug):
"""
This view deletes a file both from the database and the file system.
:param request: The current request
:type request: ~django.http.HttpResponse
:param document_id: The id of the document which is being deleted
:type document_id: int
:param region_slug: The slug of the region to which this document belongs
:type region_slug: str
:return: A redirection to the media library
:rtype: ~django.http.HttpResponseRedirect
"""
region = Region.get_current_region(request)
if request.method == "POST":
document = Document.objects.get(id=document_id)
document.delete()
return redirect("media", **{"region_slug": region.slug})
| 29.692308 | 77 | 0.746978 |
7944cafa3f42181d7981b0ec8239b5e0b19bdbe5 | 1,438 | py | Python | test/plugins/test_range_check.py | eduNEXT/edx-lint | e129a8b5478469f44737cb7ba1afc93b5a994bba | [
"Apache-2.0"
] | 43 | 2015-05-30T21:35:34.000Z | 2021-09-21T07:15:05.000Z | test/plugins/test_range_check.py | eduNEXT/edx-lint | e129a8b5478469f44737cb7ba1afc93b5a994bba | [
"Apache-2.0"
] | 106 | 2015-02-02T17:43:55.000Z | 2021-12-20T03:05:16.000Z | test/plugins/test_range_check.py | eduNEXT/edx-lint | e129a8b5478469f44737cb7ba1afc93b5a994bba | [
"Apache-2.0"
] | 22 | 2015-08-28T16:19:41.000Z | 2021-09-01T10:36:54.000Z | """Test range_check.py"""
import pytest
from .pylint_test import run_pylint
@pytest.mark.parametrize("range_name", ["range", "xrange"])
def test_range(range_name):
source = (
"""\
START, STOP, STEP = 0, 10, 1
# Bad
range(0, 10) #=A
range(0, STOP) #=B
range(0, 10, 1) #=C
range(0, STOP, 1) #=D
range(10, 20, 1) #=E
# Good
range(10)
range(1, 10)
range(0, 10, 2)
range(1, 10, 2)
# no message when variables are involved
range(START, 100)
range(START, STOP)
range(0, 10, STEP)
# if it has four arguments, we don't know what's going on...
range(0, 10, 1, "something")
# trickier cases
range("something", "or other")
[range][0](0, 10)
some_other_function(0, 10)
"""
).replace("range", range_name)
msg_ids = "simplifiable-range"
messages = run_pylint(source, msg_ids)
expected = {
f"A:simplifiable-range:{range_name}() call could be single-argument",
f"B:simplifiable-range:{range_name}() call could be single-argument",
f"C:simplifiable-range:{range_name}() call could be single-argument",
f"D:simplifiable-range:{range_name}() call could be single-argument",
f"E:simplifiable-range:{range_name}() call could be two-argument",
}
assert expected == messages
| 27.653846 | 77 | 0.564673 |
7944cb0140d50899a466521d50acbb389adeba1d | 12,995 | py | Python | components/cuda.py | RobInLabUJI/ROSLab | 3a5047a204989dea108cb163fd1ca7516ec2f5c9 | [
"MIT"
] | 10 | 2019-09-18T18:51:06.000Z | 2022-01-25T21:46:05.000Z | components/cuda.py | RobInLabUJI/ROSLab | 3a5047a204989dea108cb163fd1ca7516ec2f5c9 | [
"MIT"
] | 2 | 2019-09-11T13:02:35.000Z | 2019-10-11T12:44:13.000Z | components/cuda.py | RobInLabUJI/ROSLab | 3a5047a204989dea108cb163fd1ca7516ec2f5c9 | [
"MIT"
] | 2 | 2019-10-31T06:29:05.000Z | 2020-01-08T03:18:53.000Z | import os, sys
versions = ['8.0-runtime', '8.0-devel', '9.0-runtime', '9.0-devel',
'9.2-runtime', '9.2-devel', '10.0-runtime', '10.0-devel']
DOCKER_CUDA_HEADER = """
###################################### CUDA ####################################
"""
DOCKER_RUNTIME_CONTENTS = {}
DOCKER_RUNTIME_CONTENTS['18.04'] = {}
DOCKER_RUNTIME_CONTENTS['16.04'] = {}
DOCKER_RUNTIME_CONTENTS['18.04']['10.0'] = """
RUN apt-get update && apt-get install -y --no-install-recommends gnupg2 curl ca-certificates && \\
curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/7fa2af80.pub | apt-key add - && \\
echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \\
echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list && \\
apt-get purge --autoremove -y curl && \\
rm -rf /var/lib/apt/lists/*
ENV CUDA_VERSION 10.0.130
ENV CUDA_PKG_VERSION 10-0=$CUDA_VERSION-1
# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-cudart-$CUDA_PKG_VERSION \\
cuda-compat-10-0=410.48-1 && \\
ln -s cuda-10.0 /usr/local/cuda && \\
rm -rf /var/lib/apt/lists/*
ENV PATH /usr/local/cuda/bin:${PATH}
# nvidia-container-runtime
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV NVIDIA_REQUIRE_CUDA "cuda>=10.0 brand=tesla,driver>=384,driver<385"
ENV NCCL_VERSION 2.4.2
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-libraries-$CUDA_PKG_VERSION \\
cuda-nvtx-$CUDA_PKG_VERSION \\
libnccl2=$NCCL_VERSION-1+cuda10.0 && \\
apt-mark hold libnccl2 && \\
rm -rf /var/lib/apt/lists/*
"""
DOCKER_RUNTIME_CONTENTS['18.04']['9.2'] = """
# CUDA 9.2 is not officially supported on ubuntu 18.04 yet, we use the ubuntu 17.10 repository for CUDA instead.
RUN apt-get update && apt-get install -y --no-install-recommends gnupg2 curl ca-certificates && \\
curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1710/x86_64/7fa2af80.pub | apt-key add - && \\
echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1710/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \\
echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list && \\
apt-get purge --autoremove -y curl && \\
rm -rf /var/lib/apt/lists/*
ENV CUDA_VERSION 9.2.148
ENV CUDA_PKG_VERSION 9-2=$CUDA_VERSION-1
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-cudart-$CUDA_PKG_VERSION && \\
ln -s cuda-9.2 /usr/local/cuda && \\
rm -rf /var/lib/apt/lists/*
# nvidia-docker 1.0
LABEL com.nvidia.volumes.needed="nvidia_driver"
LABEL com.nvidia.cuda.version="${CUDA_VERSION}"
RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \\
echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
# nvidia-container-runtime
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV NVIDIA_REQUIRE_CUDA "cuda>=9.2"
ENV NCCL_VERSION 2.3.7
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-libraries-$CUDA_PKG_VERSION \\
cuda-nvtx-$CUDA_PKG_VERSION \\
libnccl2=$NCCL_VERSION-1+cuda9.2 && \\
apt-mark hold libnccl2 && \\
rm -rf /var/lib/apt/lists/*
"""
DOCKER_RUNTIME_CONTENTS['16.04']['10.0'] = """
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates apt-transport-https gnupg-curl && \\
rm -rf /var/lib/apt/lists/* && \\
NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \\
NVIDIA_GPGKEY_FPR=ae09fe4bbd223a84b2ccfce3f60f4b3d7fa2af80 && \\
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub && \\
apt-key adv --export --no-emit-version -a $NVIDIA_GPGKEY_FPR | tail -n +5 > cudasign.pub && \\
echo "$NVIDIA_GPGKEY_SUM cudasign.pub" | sha256sum -c --strict - && rm cudasign.pub && \\
echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \\
echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
ENV CUDA_VERSION 10.0.130
ENV CUDA_PKG_VERSION 10-0=$CUDA_VERSION-1
# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-cudart-$CUDA_PKG_VERSION \\
cuda-compat-10-0=410.48-1 && \\
ln -s cuda-10.0 /usr/local/cuda && \\
rm -rf /var/lib/apt/lists/*
ENV PATH /usr/local/cuda/bin:${PATH}
# nvidia-container-runtime
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV NVIDIA_REQUIRE_CUDA "cuda>=10.0 brand=tesla,driver>=384,driver<385"
ENV NCCL_VERSION 2.4.2
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-libraries-$CUDA_PKG_VERSION \\
cuda-nvtx-$CUDA_PKG_VERSION \\
libnccl2=$NCCL_VERSION-1+cuda10.0 && \\
apt-mark hold libnccl2 && \\
rm -rf /var/lib/apt/lists/*
"""
DOCKER_RUNTIME_CONTENTS['16.04']['9.0'] = """
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates apt-transport-https gnupg-curl && \\
rm -rf /var/lib/apt/lists/* && \\
NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \\
NVIDIA_GPGKEY_FPR=ae09fe4bbd223a84b2ccfce3f60f4b3d7fa2af80 && \\
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub && \\
apt-key adv --export --no-emit-version -a $NVIDIA_GPGKEY_FPR | tail -n +5 > cudasign.pub && \\
echo "$NVIDIA_GPGKEY_SUM cudasign.pub" | sha256sum -c --strict - && rm cudasign.pub && \\
echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/cuda.list && \\
echo "deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
ENV CUDA_VERSION 9.0.176
ENV CUDA_PKG_VERSION 9-0=$CUDA_VERSION-1
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-cudart-$CUDA_PKG_VERSION && \\
ln -s cuda-9.0 /usr/local/cuda && \\
rm -rf /var/lib/apt/lists/*
# nvidia-docker 1.0
LABEL com.nvidia.volumes.needed="nvidia_driver"
LABEL com.nvidia.cuda.version="${CUDA_VERSION}"
RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \\
echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
# nvidia-container-runtime
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV NVIDIA_REQUIRE_CUDA "cuda>=9.0"
ENV NCCL_VERSION 2.4.2
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-libraries-$CUDA_PKG_VERSION \\
cuda-cublas-9-0=9.0.176.4-1 \\
libnccl2=$NCCL_VERSION-1+cuda9.0 && \\
apt-mark hold libnccl2 && \\
rm -rf /var/lib/apt/lists/*
"""
DOCKER_RUNTIME_CONTENTS['16.04']['8.0'] = """
RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates apt-transport-https gnupg-curl && \\
rm -rf /var/lib/apt/lists/* && \\
NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \\
NVIDIA_GPGKEY_FPR=ae09fe4bbd223a84b2ccfce3f60f4b3d7fa2af80 && \\
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub && \\
apt-key adv --export --no-emit-version -a $NVIDIA_GPGKEY_FPR | tail -n +5 > cudasign.pub && \\
echo "$NVIDIA_GPGKEY_SUM cudasign.pub" | sha256sum -c --strict - && rm cudasign.pub && \\
echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/cuda.list
ENV CUDA_VERSION 8.0.61
ENV CUDA_PKG_VERSION 8-0=$CUDA_VERSION-1
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-nvrtc-$CUDA_PKG_VERSION \\
cuda-nvgraph-$CUDA_PKG_VERSION \\
cuda-cusolver-$CUDA_PKG_VERSION \\
cuda-cublas-8-0=8.0.61.2-1 \\
cuda-cufft-$CUDA_PKG_VERSION \\
cuda-curand-$CUDA_PKG_VERSION \\
cuda-cusparse-$CUDA_PKG_VERSION \\
cuda-npp-$CUDA_PKG_VERSION \\
cuda-cudart-$CUDA_PKG_VERSION && \\
ln -s cuda-8.0 /usr/local/cuda && \\
rm -rf /var/lib/apt/lists/*
# nvidia-docker 1.0
LABEL com.nvidia.volumes.needed="nvidia_driver"
LABEL com.nvidia.cuda.version="${CUDA_VERSION}"
RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \\
echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH}
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
# nvidia-container-runtime
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV NVIDIA_REQUIRE_CUDA "cuda>=8.0"
"""
DOCKER_DEVEL_CONTENTS = {}
DOCKER_DEVEL_CONTENTS['18.04'] = {}
DOCKER_DEVEL_CONTENTS['16.04'] = {}
DOCKER_DEVEL_CONTENTS['18.04']['10.0'] = """
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-libraries-dev-$CUDA_PKG_VERSION \\
cuda-nvml-dev-$CUDA_PKG_VERSION \\
cuda-minimal-build-$CUDA_PKG_VERSION \\
cuda-command-line-tools-$CUDA_PKG_VERSION \\
libnccl-dev=$NCCL_VERSION-1+cuda10.0 && \\
rm -rf /var/lib/apt/lists/*
ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs
"""
DOCKER_DEVEL_CONTENTS['18.04']['9.2'] = """
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-libraries-dev-$CUDA_PKG_VERSION \\
cuda-nvml-dev-$CUDA_PKG_VERSION \\
cuda-minimal-build-$CUDA_PKG_VERSION \\
cuda-command-line-tools-$CUDA_PKG_VERSION \\
libnccl-dev=$NCCL_VERSION-1+cuda9.2 && \\
rm -rf /var/lib/apt/lists/*
ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs
"""
DOCKER_DEVEL_CONTENTS['16.04']['10.0'] = """
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-libraries-dev-$CUDA_PKG_VERSION \\
cuda-nvml-dev-$CUDA_PKG_VERSION \\
cuda-minimal-build-$CUDA_PKG_VERSION \\
cuda-command-line-tools-$CUDA_PKG_VERSION \\
libnccl-dev=$NCCL_VERSION-1+cuda10.0 && \\
rm -rf /var/lib/apt/lists/*
ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs
"""
DOCKER_DEVEL_CONTENTS['16.04']['9.0'] = """
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-libraries-dev-$CUDA_PKG_VERSION \\
cuda-nvml-dev-$CUDA_PKG_VERSION \\
cuda-minimal-build-$CUDA_PKG_VERSION \\
cuda-command-line-tools-$CUDA_PKG_VERSION \\
cuda-core-9-0=9.0.176.3-1 \\
cuda-cublas-dev-9-0=9.0.176.4-1 \\
libnccl-dev=$NCCL_VERSION-1+cuda9.0 && \\
rm -rf /var/lib/apt/lists/*
ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs
"""
DOCKER_DEVEL_CONTENTS['16.04']['8.0'] = """
RUN apt-get update && apt-get install -y --no-install-recommends \\
cuda-core-$CUDA_PKG_VERSION \\
cuda-misc-headers-$CUDA_PKG_VERSION \\
cuda-command-line-tools-$CUDA_PKG_VERSION \\
cuda-nvrtc-dev-$CUDA_PKG_VERSION \\
cuda-nvml-dev-$CUDA_PKG_VERSION \\
cuda-nvgraph-dev-$CUDA_PKG_VERSION \\
cuda-cusolver-dev-$CUDA_PKG_VERSION \\
cuda-cublas-dev-8-0=8.0.61.2-1 \\
cuda-cufft-dev-$CUDA_PKG_VERSION \\
cuda-curand-dev-$CUDA_PKG_VERSION \\
cuda-cusparse-dev-$CUDA_PKG_VERSION \\
cuda-npp-dev-$CUDA_PKG_VERSION \\
cuda-cudart-dev-$CUDA_PKG_VERSION \\
cuda-driver-dev-$CUDA_PKG_VERSION && \\
rm -rf /var/lib/apt/lists/*
ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs
"""
def write(DOCKER_FILE, version, ubuntu):
if version in versions:
with open(DOCKER_FILE, "a") as dockerfile:
dockerfile.write(DOCKER_CUDA_HEADER)
cuda = version.split('-')[0]
try:
dockerfile.write(DOCKER_RUNTIME_CONTENTS[ubuntu][cuda])
if 'devel' in version:
dockerfile.write(DOCKER_DEVEL_CONTENTS[ubuntu][cuda])
except KeyError as e:
print("CUDA version %s not supported in Ubuntu %s" % (cuda, ubuntu) )
sys.exit(1)
return
else:
print("cuda: version %s not supported. Options: %s" % (version, versions))
sys.exit(1)
| 42.32899 | 150 | 0.679646 |
7944cb4d1cce2d488f0154b32c5872528fc9abba | 2,051 | py | Python | drupal_dockerizer/appconfig.py | jet-dev-team/drupal-dockerizer-cli | 5fd5533459e49a3c580f6115f2d7fd1074fa29aa | [
"MIT"
] | null | null | null | drupal_dockerizer/appconfig.py | jet-dev-team/drupal-dockerizer-cli | 5fd5533459e49a3c580f6115f2d7fd1074fa29aa | [
"MIT"
] | null | null | null | drupal_dockerizer/appconfig.py | jet-dev-team/drupal-dockerizer-cli | 5fd5533459e49a3c580f6115f2d7fd1074fa29aa | [
"MIT"
] | null | null | null | import os
import appdirs
from pathlib import Path
import yaml
config_dir_name = "drupal_dockerizer"
config_file_name = "config.yml"
user_config_dir = appdirs.user_config_dir()
class AppConfig:
data = {}
def __init__(self) -> None:
user_config_path = Path(user_config_dir)
config_dir_path = user_config_path.joinpath(config_dir_name)
config_file_path = config_dir_path.joinpath(config_file_name)
self.config_file_path = str(config_file_path)
if not os.path.exists(str(config_dir_path)):
os.mkdir(str(config_dir_path))
if not os.path.exists(self.config_file_path):
self.data = {
"is_check_requirements_tools": False,
"version": "0.0.5",
"instances": {},
}
self.save()
self.load()
def save(self):
file_config = open(self.config_file_path, "w")
yaml.safe_dump(self.data, file_config, sort_keys=True)
file_config.close()
def load(self):
file_config = open(self.config_file_path, "r")
self.data = dict(yaml.full_load(file_config))
file_config.close()
def addInstance(self, instance_conf):
self.data["instances"][instance_conf.data["compose_project_name"]] = {
"instance": instance_conf.data["compose_project_name"],
"root_dir": instance_conf.data["drupal_root_dir"],
"domain": instance_conf.data["domain_name"]
if instance_conf.data["advanced_networking"]
else "http://localhost",
"status": "up",
}
def stopInstance(self, instance_conf):
self.data["instances"][instance_conf.data["compose_project_name"]][
"status"
] = "stop"
def upInstance(self, instance_conf):
self.data["instances"][instance_conf.data["compose_project_name"]][
"status"
] = "up"
def removeInstance(self, instance_conf):
del self.data["instances"][instance_conf.data["compose_project_name"]]
| 32.046875 | 78 | 0.629449 |
7944cb4e2b632332a44d51283ae21043ba28c142 | 18,349 | py | Python | tests/test_scheduledtask.py | leonidumanskiy/scheduledtask | c98c403dad95146ea6aac8fee71a7659b3c7209c | [
"MIT"
] | 4 | 2017-02-04T17:35:13.000Z | 2020-07-08T13:32:13.000Z | tests/test_scheduledtask.py | leonidumanskiy/scheduledtask | c98c403dad95146ea6aac8fee71a7659b3c7209c | [
"MIT"
] | 3 | 2019-04-24T07:54:42.000Z | 2019-05-22T10:01:03.000Z | tests/test_scheduledtask.py | leonidumanskiy/scheduledtask | c98c403dad95146ea6aac8fee71a7659b3c7209c | [
"MIT"
] | 1 | 2019-04-24T09:27:07.000Z | 2019-04-24T09:27:07.000Z | import unittest
from scheduledtask import ScheduledTask
from datetime import datetime
class TestScheduledTask(unittest.TestCase):
def _test_previous(self, minutes=None, hours=None, days=None, days_of_week=None, days_of_week_num=None, weeks=None,
months=None, years=None, current_time=None, expected_result=None):
"""Common wrapper for get_previous_time test
"""
task = ScheduledTask(minutes, hours, days, days_of_week, days_of_week_num, weeks, months, years)
self.assertEqual(task.get_previous_time(current_time), expected_result)
def _test_next(self, minutes=None, hours=None, days=None, days_of_week=None, days_of_week_num=None, weeks=None,
months=None, years=None, current_time=None, expected_result=None):
"""Common wrapper for get_next_time test
"""
task = ScheduledTask(minutes, hours, days, days_of_week, days_of_week_num, weeks, months, years)
self.assertEqual(task.get_next_time(current_time), expected_result)
def test_previous_same_day(self):
"""0:45 same day
"""
self._test_previous(minutes=[15, 45], hours=[0], days=None, months=None, years=None,
current_time=datetime(2016, 11, 12, 23, 0),
expected_result=datetime(2016, 11, 12, 0, 45))
def test_previous_every_nth_day_of_month(self):
"""23:00 10th of November
"""
self._test_previous(minutes=[0], hours=[23], days=[10], months=None, years=None,
# at 23:00 every 10th day of month
current_time=datetime(2016, 11, 12, 23, 0), # 23:00 12/11/2016
expected_result=datetime(2016, 11, 10, 23, 0)) # 23:00 10/11/2016
def test_previous_every_nth_day_of_month2(self):
"""15th last month
"""
self._test_previous(minutes=[0], hours=[23], days=[15], months=None, years=None,
# at 23:00 every 15th day of month
current_time=datetime(2016, 11, 12, 23, 0), # 23:00 12/11/2016
expected_result=datetime(2016, 10, 15, 23, 0)) # 23:00 15/10/2016
def test_previous_31st_day_of_month(self):
"""31st of October, 2016
"""
self._test_previous(minutes=[0], hours=[0], days=[31], months=None, years=None,
# at 00:00 31st day of month
current_time=datetime(2016, 12, 15, 0, 0), # 00:00 15/12/2016
expected_result=datetime(2016, 10, 31, 0, 0)) # 00:00 31/10/2016
def test_previous_every_nth_day_of_month_correct_minute(self):
"""15th last month, check for correct minute (30)
"""
self._test_previous(minutes=[0, 30], hours=[0], days=[15], months=None, years=None,
# at 00:00 or 00:30 every 15th day of month
current_time=datetime(2016, 11, 12, 23, 0), # 23:00 12/11/2016
expected_result=datetime(2016, 10, 15, 0, 30)) # 00:30 12/11/2016
def test_previous_independence_day_this_year(self):
"""This year independence day, 4th of July
"""
self._test_previous(minutes=[0], hours=[0], days=[4], months=[7], years=None, # at 00:00 every 4th of July
current_time=datetime(2016, 11, 12, 0, 0), # 00:00 12/11/2016
expected_result=datetime(2016, 7, 4, 0, 0)) # 00:00 04/07/2015
def test_previous_independence_day_last_year(self):
"""Last year independence day, 4th of July
"""
self._test_previous(minutes=[0], hours=[0], days=[4], months=[7], years=None, # at 00:00 every 4th of July
current_time=datetime(2016, 3, 12, 0, 0), # 00:00 12/11/2016
expected_result=datetime(2015, 7, 4, 0, 0)) # 00:00 04/07/2015
def test_previous_every_30_minutes(self):
"""Last hour 00/30 minutes
"""
self._test_previous(minutes=[0, 30], hours=None, days=None, months=None, years=None,
# every 30 mins, at 00 and 30th minute
current_time=datetime(2016, 11, 15, 19, 55), # 19:55 15/11/2016
expected_result=datetime(2016, 11, 15, 19, 30)) # 19:30 15/11/2016
def test_previous_31st_january(self):
"""January, 31st, when current month is March, 15th
"""
self._test_previous(minutes=[0], hours=[0], days=[31], months=[1], years=None, # January, 31st, at 00:00
current_time=datetime(2016, 3, 15, 0, 0), # 00:00 15/3/2016
expected_result=datetime(2016, 1, 31, 0, 0)) # 00:00 31/1/2016
def test_previous_31st_day_of_month_skip_feb(self):
"""31st day of month, when current month is March, 15th (should skip February since it doesn't have 31 days)
"""
self._test_previous(minutes=[0], hours=[0], days=[31], months=None, years=None,
# Every 31st day of month, at 00:00
current_time=datetime(2016, 3, 15, 0, 0), # 00:00 15/3/2016
expected_result=datetime(2016, 1, 31, 0, 0)) # 00:00 31/1/2016
def test_previous_every_monday(self):
"""Every monday at 00:00, check this week
"""
self._test_previous(minutes=[0], hours=[0], days_of_week=[0], months=None, years=None, # Every Monday at 00:00
current_time=datetime(2016, 11, 16, 15, 30), # 15:30 16/11/2016 Wednesday
expected_result=datetime(2016, 11, 14, 0, 0)) # 00:00 14/11/2016 Monday
def test_previous_every_friday(self):
"""Every friday at 00:00, check last week
"""
self._test_previous(minutes=[0], hours=[0], days_of_week=[4], months=None, years=None, # Every Friday at 00:00
current_time=datetime(2016, 11, 16, 15, 30), # 15:30 16/11/2016 Wednesday
expected_result=datetime(2016, 11, 11, 0, 0)) # 00:00 11/11/2016 Friday
def test_previous_first_monday_of_november(self): # Every first Monday of November
"""Every first Monday of November
"""
self._test_previous(minutes=[0], hours=[0], days_of_week=[0], days_of_week_num=[0], months=[11], years=None,
# Every first Monday of November
current_time=datetime(2016, 11, 17, 15, 30), # 15:30 17/11/2016 Wednesday
expected_result=datetime(2016, 11, 7, 0, 0)) # 00:00 7/11/2016 Monday
def test_previous_first_tuesday_of_november(self): # Every first Tuesday of November
"""Every first Tuesday of November
"""
self._test_previous(minutes=[0], hours=[0], days_of_week=[1], days_of_week_num=[0], months=[11], years=None,
# Every first Tuesday of November
current_time=datetime(2016, 11, 16, 15, 30), # 15:30 16/11/2016 Wednesday
expected_result=datetime(2016, 11, 1, 0, 0)) # 00:00 1/11/2016 Tuesday
def test_previous_5th_saturday_of_december(self): # Every last Saturday of December
"""Every 4th Saturday of December
"""
self._test_previous(minutes=[0], hours=[0], days_of_week=[5], days_of_week_num=[4], months=[12], years=None,
# Every 5th Saturday of December
current_time=datetime(2017, 1, 1, 00, 00), # 00:00 01/01/2017 Sunday
expected_result=datetime(2016, 12, 31, 0, 0)) # 00:00 31/12/2016 Saturday
def test_previous_5th_wednesday(self): # Every 5th wednesday
"""Every 5th Wednesday
"""
self._test_previous(minutes=[0], hours=[0], days_of_week=[2], days_of_week_num=[4], months=None, years=None,
# Every 5th Wednesday
current_time=datetime(2017, 1, 31, 00, 00), # 00:00 31/01/2017 Tuesday
expected_result=datetime(2016, 11, 30, 0, 0)) # 00:00 30/11/2016 Wednesday
def test_previous_every_even_day(self): # Every even day at 00:00
"""Every even day at 00:00
"""
self._test_previous(minutes=[0], hours=[0], days=range(0, 31, 2), months=None, years=None,
# Every even day
current_time=datetime(2016, 11, 17, 15, 00), # 15:00 17/11/2017 Thursday
expected_result=datetime(2016, 11, 16, 0, 0)) # 00:00 30/11/2016 Wednesday
def test_previous_every_third_hour(self): # Every third hour
"""Every third hour
"""
self._test_previous(minutes=[0], hours=range(0, 24, 3), days=None, months=None, years=None,
# Every third hour
current_time=datetime(2016, 11, 17, 10, 00), # 10:00 17/11/2017 Thursday
expected_result=datetime(2016, 11, 17, 9, 0)) # 9:00 17/11/2016 Thursday
def test_previous_monday_before_presidential_election_day(self): # Every first Monday of November every 4rth year
"""Every first Monday of November every 4rth year, starting from 1848
"""
self._test_previous(minutes=[0], hours=[0], days_of_week=[0], days_of_week_num=[0], months=[11], years=range(1848, 9999, 4),
# Every first Monday of November, every 4rth year starting with 1848
current_time=datetime(2018, 11, 17, 15, 30), # 15:30 17/11/2016 Wednesday
expected_result=datetime(2016, 11, 7, 0, 0)) # 00:00 7/11/2016 Monday
def test_next_same_day(self):
"""0:45 same day
"""
self._test_next(minutes=[15, 45], hours=[0], days=None, months=None, years=None,
current_time=datetime(2016, 11, 12, 0, 30),
expected_result=datetime(2016, 11, 12, 0, 45))
def test_next_every_nth_day_of_month(self):
"""23:00 10th of November
"""
self._test_next(minutes=[0], hours=[23], days=[10], months=None, years=None,
# at 23:00 every 10th day of month
current_time=datetime(2016, 11, 8, 23, 0), # 23:00 8/11/2016
expected_result=datetime(2016, 11, 10, 23, 0)) # 23:00 10/11/2016
def test_next_every_nth_day_of_month2(self):
"""15th next month
"""
self._test_next(minutes=[0], hours=[23], days=[15], months=None, years=None,
# at 23:00 every 15th day of month
current_time=datetime(2016, 9, 20, 23, 0), # 23:00 20/9/2016
expected_result=datetime(2016, 10, 15, 23, 0)) # 23:00 15/10/2016
def test_next_31st_day_of_month(self):
"""31st of October, 2016
"""
self._test_next(minutes=[0], hours=[0], days=[31], months=None, years=None,
# at 00:00 31st day of month
current_time=datetime(2016, 9, 15, 0, 0), # 00:00 15/9/2016
expected_result=datetime(2016, 10, 31, 0, 0)) # 00:00 31/10/2016
def test_next_every_nth_day_of_month_correct_minute(self):
"""15th next month, check for correct minute (30)
"""
self._test_next(minutes=[0, 30], hours=[0], days=[15], months=None, years=None,
# at 00:00 or 00:30 every 15th day of month
current_time=datetime(2016, 9, 16, 23, 0), # 23:00 16/9/2016
expected_result=datetime(2016, 10, 15, 0, 0)) # 00:30 12/11/2016
def test_next_independence_day_this_year(self):
"""This year independence day, 4th of July
"""
self._test_next(minutes=[0], hours=[0], days=[4], months=[7], years=None, # at 00:00 every 4th of July
current_time=datetime(2016, 3, 12, 0, 0), # 00:00 12/03/2016
expected_result=datetime(2016, 7, 4, 0, 0)) # 00:00 04/07/2016
def test_next_independence_day_next_year(self):
"""Next year independence day, 4th of July
"""
self._test_next(minutes=[0], hours=[0], days=[4], months=[7], years=None, # at 00:00 every 4th of July
current_time=datetime(2014, 8, 15, 0, 0), # 00:00 08/15/2014
expected_result=datetime(2015, 7, 4, 0, 0)) # 00:00 04/07/2015
def test_next_every_30_minutes(self):
"""Next hour 00/30 minutes
"""
self._test_next(minutes=[0, 30], hours=None, days=None, months=None, years=None,
# every 30 mins, at 00 and 30th minute
current_time=datetime(2016, 11, 15, 19, 5), # 19:55 15/11/2016
expected_result=datetime(2016, 11, 15, 19, 30)) # 19:30 15/11/2016
def test_next_1st_january(self):
"""January, 1st, when current month is March, 15th
"""
self._test_next(minutes=[0], hours=[0], days=[1], months=[1], years=None, # January, 1st, at 00:00
current_time=datetime(2016, 3, 15, 0, 0), # 00:00 15/3/2016
expected_result=datetime(2017, 1, 1, 0, 0)) # 00:00 31/1/2016
def test_next_31st_day_of_month_skip_feb(self):
"""31st day of month, when current month is January, 31st (should skip February since it doesn't have 31 days)
"""
self._test_next(minutes=[0], hours=[0], days=[31], months=None, years=None,
# Every 31st day of month, at 00:00
current_time=datetime(2016, 1, 31, 15, 0), # 15:00 31/1/2016
expected_result=datetime(2016, 3, 31, 0, 0)) # 00:00 31/3/2016
def test_next_every_wednesday(self):
"""Every Wednesday at 00:00, check this week
"""
self._test_next(minutes=[0], hours=[0], days_of_week=[2], months=None, years=None, # Every Wednesday at 00:00
current_time=datetime(2016, 11, 15, 15, 30), # 15:30 15/11/2016 Tuesday
expected_result=datetime(2016, 11, 16, 0, 0)) # 00:00 16/11/2016 Wednesday
def test_next_every_monday(self):
"""Every monday at 00:00, check next week
"""
self._test_next(minutes=[0], hours=[0], days_of_week=[0], months=None, years=None, # Every Monday at 00:00
current_time=datetime(2016, 11, 16, 15, 30), # 15:30 16/11/2016 Wednesday
expected_result=datetime(2016, 11, 21, 0, 0)) # 00:00 21/11/2016 Monday
def test_next_first_monday_of_november(self): # Every first Monday of November
"""Every first Monday of November
"""
self._test_next(minutes=[0], hours=[0], days_of_week=[0], days_of_week_num=[0], months=[11], years=None,
# Every first Monday of November
current_time=datetime(2016, 10, 18, 15, 30), # 15:30 18/10/2016 Tuesday
expected_result=datetime(2016, 11, 7, 0, 0)) # 00:00 7/11/2016 Monday
def test_next_first_tuesday_of_november(self): # Every first Tuesday of November
"""Every first Tuesday of November
"""
self._test_next(minutes=[0], hours=[0], days_of_week=[1], days_of_week_num=[0], months=[11], years=None,
# Every first Tuesday of November
current_time=datetime(2016, 10, 16, 15, 30), # 15:30 16/10/2016 Monday
expected_result=datetime(2016, 11, 1, 0, 0)) # 00:00 1/11/2016 Tuesday
def test_next_5th_saturday_of_december(self): # Every last Saturday of December
"""Every 5th (n=4) Saturday of December
"""
self._test_next(minutes=[0], hours=[0], days_of_week=[5], days_of_week_num=[4], months=[12], years=None,
# Every 5th Saturday of December
current_time=datetime(2015, 10, 15, 00, 00), # 00:00 15/10/2015 Thursday
expected_result=datetime(2016, 12, 31, 0, 0)) # 00:00 31/12/2016 Saturday
def test_next_5th_wednesday(self): # Every 5th wednesday
"""Every 5th Wednesday
"""
self._test_next(minutes=[0], hours=[0], days_of_week=[2], days_of_week_num=[4], months=None, years=None,
# Every 5th Wednesday
current_time=datetime(2016, 10, 1, 00, 00), # 00:00 1/10/2017 Saturday
expected_result=datetime(2016, 11, 30, 0, 0)) # 00:00 30/11/2016 Wednesday
def test_next_every_even_day(self): # Every even day at 00:00
"""Every even day at 00:00
"""
self._test_next(minutes=[0], hours=[0], days=range(0, 31, 2), months=None, years=None,
# Every even day
current_time=datetime(2016, 11, 17, 15, 00), # 15:00 17/11/2016 Thursday
expected_result=datetime(2016, 11, 18, 0, 0)) # 00:00 18/11/2016 Friday
def test_next_every_third_hour(self): # Every third hour
"""Every third hour
"""
self._test_next(minutes=[0], hours=range(0, 24, 3), days=None, months=None, years=None,
# Every third hour
current_time=datetime(2016, 11, 17, 10, 00), # 10:00 17/11/2017 Thursday
expected_result=datetime(2016, 11, 17, 12, 0)) # 12:00 17/11/2016 Thursday
def test_next_monday_before_presidential_election_day(self): # Every first Monday of November every 4rth year
"""Every first Monday of November every 4rth year, starting from 1848
"""
self._test_next(minutes=[0], hours=[0], days_of_week=[0], days_of_week_num=[0], months=[11],
years=range(1848, 9999, 4),
# Every first Monday of November, every 4rth year starting with 1848
current_time=datetime(2016, 11, 17, 15, 30), # 15:30 17/11/2016 Wednesday
expected_result=datetime(2020, 11, 2, 0, 0)) # 00:00 2/11/2020 Monday
if __name__ == '__main__':
unittest.main()
| 57.883281 | 132 | 0.572293 |
7944cb76133aef1d7735dc14084cc20bf4764f30 | 7,326 | py | Python | openff/interchange/tests/utils.py | daico007/openff-interchange | f6586908d39d75abab2b731ec4f0c2891e050cb3 | [
"MIT"
] | null | null | null | openff/interchange/tests/utils.py | daico007/openff-interchange | f6586908d39d75abab2b731ec4f0c2891e050cb3 | [
"MIT"
] | 8 | 2021-06-23T19:24:19.000Z | 2022-03-01T05:04:46.000Z | openff/interchange/tests/utils.py | justinGilmer/openff-system | d5a3da9701c66eddf49cacd6038342f413c04786 | [
"MIT"
] | null | null | null | from collections import defaultdict
from typing import Dict, List, Tuple
import mdtraj as md
import numpy as np
import pytest
from openff.toolkit.topology import Molecule
from openff.utilities.utilities import has_executable
from simtk import openmm
from simtk import unit as simtk_unit
from openff.interchange.components.interchange import Interchange
from openff.interchange.components.mdtraj import OFFBioTop
HAS_GROMACS = any(has_executable(e) for e in ["gmx", "gmx_d"])
HAS_LAMMPS = any(has_executable(e) for e in ["lammps", "lmp_mpi", "lmp_serial"])
needs_gmx = pytest.mark.skipif(not HAS_GROMACS, reason="Needs GROMACS")
needs_lmp = pytest.mark.skipif(not HAS_LAMMPS, reason="Needs GROMACS")
kj_nm2_mol = simtk_unit.kilojoule_per_mole / simtk_unit.nanometer ** 2
kj_rad2_mol = simtk_unit.kilojoule_per_mole / simtk_unit.radian ** 2
def top_from_smiles(
smiles: str,
n_molecules: int = 1,
) -> OFFBioTop:
"""Create a gas phase OpenFF Topology from a single-molecule SMILES
Parameters
----------
smiles : str
The SMILES of the input molecule
n_molecules : int, optional, default = 1
The number of copies of the SMILES molecule from which to
compose a topology
Returns
-------
top : opennff.interchange.components.mdtraj.OFFBioTop
A single-molecule, gas phase-like topology
"""
mol = Molecule.from_smiles(smiles)
mol.generate_conformers(n_conformers=1)
top = OFFBioTop.from_molecules(n_molecules * [mol])
top.mdtop = md.Topology.from_openmm(top.to_openmm()) # type: ignore[attr-defined]
# Add dummy box vectors
# TODO: Revisit if/after Topology.is_periodic
top.box_vectors = np.eye(3) * 10 * simtk_unit.nanometer
return top
def _get_charges_from_openmm_system(omm_sys: openmm.System):
for force in omm_sys.getForces():
if type(force) == openmm.NonbondedForce:
break
for idx in range(omm_sys.getNumParticles()):
param = force.getParticleParameters(idx)
yield param[0].value_in_unit(simtk_unit.elementary_charge)
def _get_sigma_from_nonbonded_force(
n_particles: int, nonbond_force: openmm.NonbondedForce
):
for idx in range(n_particles):
param = nonbond_force.getParticleParameters(idx)
yield param[1].value_in_unit(simtk_unit.nanometer)
def _get_epsilon_from_nonbonded_force(
n_particles: int, nonbond_force: openmm.NonbondedForce
):
for idx in range(n_particles):
param = nonbond_force.getParticleParameters(idx)
yield param[2].value_in_unit(simtk_unit.kilojoule_per_mole)
def _get_lj_params_from_openmm_system(omm_sys: openmm.System):
for force in omm_sys.getForces():
if type(force) == openmm.NonbondedForce:
break
n_particles = omm_sys.getNumParticles()
sigmas = np.asarray([*_get_sigma_from_nonbonded_force(n_particles, force)])
epsilons = np.asarray([*_get_epsilon_from_nonbonded_force(n_particles, force)])
return sigmas, epsilons
def _get_charges_from_openff_interchange(off_sys: Interchange):
charges_ = [*off_sys.handlers["Electrostatics"].charges.values()]
charges = np.asarray([charge.magnitude for charge in charges_])
return charges
def _create_torsion_dict(torsion_force) -> Dict[Tuple[int], List[Tuple]]:
torsions = defaultdict(list)
for i in range(torsion_force.getNumTorsions()):
p1, p2, p3, p4, periodicity, phase, k = torsion_force.getTorsionParameters(i)
key = (p1, p2, p3, p4)
torsions[key]
torsions[key].append((periodicity, phase, k))
return torsions
def _create_bond_dict(bond_force):
bonds = dict()
for i in range(bond_force.getNumBonds()):
p1, p2, length, k = bond_force.getBondParameters(i)
key = (p1, p2)
bonds[key] = (length, k)
return bonds
def _create_angle_dict(angle_force):
angles = dict()
for i in range(angle_force.getNumAngles()):
p1, p2, p3, theta, k = angle_force.getAngleParameters(i)
key = (p1, p2, p3)
angles[key] = (theta, k)
return angles
def _compare_individual_torsions(x, y):
assert x[0] == y[0]
assert x[1] == y[1]
assert (x[2] - y[2]) < 1e-15 * simtk_unit.kilojoule_per_mole
def _compare_torsion_forces(force1, force2):
sorted1 = _create_torsion_dict(torsion_force=force1)
sorted2 = _create_torsion_dict(torsion_force=force2)
assert sum(len(v) for v in sorted1.values()) == force1.getNumTorsions()
assert sum(len(v) for v in sorted2.values()) == force2.getNumTorsions()
assert len(sorted1) == len(sorted2)
for key in sorted1:
for i in range(len(sorted1[key])):
_compare_individual_torsions(sorted1[key][i], sorted2[key][i])
def _compare_bond_forces(force1, force2):
assert force1.getNumBonds() == force2.getNumBonds()
bonds1 = _create_bond_dict(force1)
bonds2 = _create_bond_dict(force2)
for key in bonds1:
assert abs(bonds2[key][0] - bonds1[key][0]) < 1e-15 * simtk_unit.nanometer
assert abs(bonds2[key][1] - bonds1[key][1]) < 1e-9 * kj_nm2_mol, abs(
bonds2[key][1] - bonds1[key][1]
)
def _compare_angle_forces(force1, force2):
assert force1.getNumAngles() == force2.getNumAngles()
angles1 = _create_angle_dict(force1)
angles2 = _create_angle_dict(force2)
for key in angles1:
assert abs(angles2[key][0] - angles1[key][0]) < 1e-15 * simtk_unit.radian
assert abs(angles2[key][1] - angles1[key][1]) < 1e-10 * kj_rad2_mol
def _compare_nonbonded_settings(force1, force2):
for attr in dir(force1):
if not attr.startswith("get") or attr in [
"getExceptionParameterOffset",
"getExceptionParameters",
"getGlobalParameterDefaultValue",
"getGlobalParameterName",
"getLJPMEParametersInContext",
"getPMEParametersInContext",
"getParticleParameterOffset",
"getParticleParameters",
]:
continue
assert getattr(force1, attr)() == getattr(force2, attr)(), attr
def _compare_nonbonded_parameters(force1, force2):
assert force1.getNumParticles() == force2.getNumParticles()
for i in range(force1.getNumParticles()):
q1, sig1, eps1 = force1.getParticleParameters(i)
q2, sig2, eps2 = force2.getParticleParameters(i)
assert abs(q2 - q1) < 1e-12 * simtk_unit.elementary_charge
assert abs(sig2 - sig1) < 1e-12 * simtk_unit.nanometer
assert abs(eps2 - eps1) < 1e-12 * simtk_unit.kilojoule_per_mole
def _compare_exceptions(force1, force2):
assert force1.getNumExceptions() == force2.getNumExceptions()
for i in range(force1.getNumExceptions()):
_, _, q1, sig1, eps1 = force1.getExceptionParameters(i)
_, _, q2, sig2, eps2 = force2.getExceptionParameters(i)
assert abs(q2 - q1) < 1e-12 * simtk_unit.elementary_charge ** 2
assert abs(sig2 - sig1) < 1e-12 * simtk_unit.nanometer
assert abs(eps2 - eps1) < 1e-12 * simtk_unit.kilojoule_per_mole
def _get_force(openmm_sys: openmm.System, force_type):
forces = [f for f in openmm_sys.getForces() if type(f) == force_type]
if len(forces) > 1:
raise NotImplementedError("Not yet able to process duplicate forces types")
return forces[0]
| 33.452055 | 86 | 0.692192 |
7944cb9995ef4fd3e731b0447dd930473d2a3924 | 4,779 | py | Python | dolo/numeric/grids.py | Filhagosa/dolo | 384a7b144f925e4edf149abf9828b16e1a0f8798 | [
"BSD-2-Clause"
] | null | null | null | dolo/numeric/grids.py | Filhagosa/dolo | 384a7b144f925e4edf149abf9828b16e1a0f8798 | [
"BSD-2-Clause"
] | null | null | null | dolo/numeric/grids.py | Filhagosa/dolo | 384a7b144f925e4edf149abf9828b16e1a0f8798 | [
"BSD-2-Clause"
] | null | null | null | from functools import reduce
from operator import mul
from quantecon import cartesian
import numpy as np
from numpy import zeros
def prod(l): return reduce(mul, l, 1.0)
from dolo.numeric.misc import mlinspace
class Grid:
def __mul__(self, rgrid):
return cat_grids(self, rgrid)
@property
def nodes(self):
return self.__nodes__
@property
def n_nodes(self):
return self.__nodes__.shape[0]
def node(self, i):
return self.__nodes__[i,:]
class EmptyGrid(Grid):
type = 'empty'
@property
def nodes(self):
return None
@property
def n_nodes(self):
return 0
def node(self, i):
return None
def __add__(self, g):
return g
class PointGrid(Grid):
type = 'point'
def __init__(self, point):
self.point = np.array(point)
@property
def nodes(self):
return None
@property
def n_nodes(self):
return 1
def node(self, i):
return None
class UnstructuredGrid(Grid):
type = 'unstructured'
def __init__(self, nodes):
nodes = np.array(nodes, dtype=float)
self.min = nodes.min(axis=0)
self.max = nodes.max(axis=0)
self.__nodes__ = nodes
self.d = len(self.min)
class CartesianGrid(Grid):
pass
class UniformCartesianGrid(CartesianGrid):
type = 'UniformCartesian'
def __init__(self, min, max, n=[]):
self.d = len(min)
# this should be a tuple
self.min = np.array(min, dtype=float)
self.max = np.array(max, dtype=float)
if len(n) == 0:
self.n = np.zeros(n, dtype=int) + 20
else:
self.n = np.array(n, dtype=int)
# this should be done only on request.
self.__nodes__ = mlinspace(self.min, self.max, self.n)
# def node(i:)
# pass
def __add__(self, g):
if not isinstance(g, UniformCartesianGrid):
raise Exception("Not implemented.")
n = np.array( tuple(self.n) + tuple(g.n))
min = np.array( tuple(self.min) + tuple(self.min) )
max = np.array( tuple(self.max) + tuple(self.max) )
return UniformCartesianGrid(min, max, n)
def __numba_repr__(self):
return tuple([(self.min[i], self.max[i], self.n[i]) for i in range(self.d)])
class NonUniformCartesianGrid(CartesianGrid):
type = "NonUniformCartesian"
def __init__(self, list_of_nodes):
list_of_nodes = [np.array(l) for l in list_of_nodes]
self.min = [min(l) for l in list_of_nodes]
self.max = [max(l) for l in list_of_nodes]
self.n = np.array([(len(e)) for e in list_of_nodes])
# this should be done only on request.
self.__nodes__ = cartesian(list_of_nodes)
self.list_of_nodes = list_of_nodes # think of a better name
def __add__(self, g):
if not isinstance(g, NonUniformCartesianGrid):
raise Exception("Not implemented.")
return NonUniformCartesianGrid( self.list_of_nodes + g.list_of_nodes )
def __numba_repr__(self):
return tuple([np.array(e) for e in self.list_of_nodes])
class SmolyakGrid(Grid):
type = "Smolyak"
def __init__(self, min, max, mu=2):
from interpolation.smolyak import SmolyakGrid as ISmolyakGrid
min = np.array(min)
max = np.array(max)
self.min = min
self.max = max
self.mu = mu
d = len(min)
sg = ISmolyakGrid(d, mu, lb=min, ub=max)
self.sg = sg
self.d = d
self.__nodes__ = sg.grid
def cat_grids(grid_1, grid_2):
if isinstance(grid_1, EmptyGrid):
return grid_2
if isinstance(grid_1, CartesianGrid) and isinstance(grid_2, CartesianGrid):
min = np.concatenate([grid_1.min, grid_2.min])
max = np.concatenate([grid_1.max, grid_2.max])
n = np.concatenate([grid_1.n, grid_2.n])
return CartesianGrid(min, max, n)
else:
raise Exception("Not Implemented.")
# compat
def node(grid, i): return grid.node(i)
def nodes(grid): return grid.nodes
def n_nodes(grid): return grid.n_nodes
if __name__ == "__main__":
print("Cartsian Grid")
grid = CartesianGrid([0.1, 0.3], [9, 0.4], [50, 10])
print(grid.nodes)
print(nodes(grid))
print("UnstructuredGrid")
ugrid = UnstructuredGrid([[0.1, 0.3], [9, 0.4], [50, 10]])
print(nodes(ugrid))
print(node(ugrid,0))
print(n_nodes(ugrid))
print("Non Uniform CartesianGrid")
ugrid = NonUniformCartesianGrid([[0.1, 0.3], [9, 0.4], [50, 10]])
print(nodes(ugrid))
print(node(ugrid,0))
print(n_nodes(ugrid))
print("Smolyak Grid")
sg = SmolyakGrid([0.1, 0.2], [1.0, 2.0], 2)
print(nodes(sg))
print(node(sg, 1))
print(n_nodes(sg))
| 23.658416 | 84 | 0.608286 |
7944ccc62d11aa457b5528cee727ec1cf2a0e7bc | 8,479 | py | Python | mimic3models/length_of_stay/utils.py | nlhkh/mimic3-benchmarks | f87a2040263c4d767f7e79768221685ec0331aa7 | [
"MIT"
] | 1 | 2021-03-07T11:33:31.000Z | 2021-03-07T11:33:31.000Z | mimic3models/length_of_stay/utils.py | nlhkh/mimic3-benchmarks | f87a2040263c4d767f7e79768221685ec0331aa7 | [
"MIT"
] | null | null | null | mimic3models/length_of_stay/utils.py | nlhkh/mimic3-benchmarks | f87a2040263c4d767f7e79768221685ec0331aa7 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
from mimic3models import metrics
from mimic3models import common_utils
import threading
import os
import numpy as np
import random
def preprocess_chunk(data, ts, discretizer, normalizer=None):
data = [discretizer.transform(X, end=t)[0] for (X, t) in zip(data, ts)]
if normalizer is not None:
data = [normalizer.transform(X) for X in data]
return data
class BatchGen(object):
def __init__(self, reader, partition, discretizer, normalizer,
batch_size, steps, shuffle, return_names=False):
self.reader = reader
self.partition = partition
self.discretizer = discretizer
self.normalizer = normalizer
self.batch_size = batch_size
self.shuffle = shuffle
self.return_names = return_names
if steps is None:
self.n_examples = reader.get_number_of_examples()
self.steps = (self.n_examples + batch_size - 1) // batch_size
else:
self.n_examples = steps * batch_size
self.steps = steps
self.chunk_size = min(1024, self.steps) * batch_size
self.lock = threading.Lock()
self.generator = self._generator()
def _generator(self):
B = self.batch_size
while True:
if self.shuffle:
self.reader.random_shuffle()
remaining = self.n_examples
while remaining > 0:
current_size = min(self.chunk_size, remaining)
remaining -= current_size
ret = common_utils.read_chunk(self.reader, current_size)
Xs = ret["X"]
ts = ret["t"]
ys = ret["y"]
names = ret["name"]
Xs = preprocess_chunk(Xs, ts, self.discretizer, self.normalizer)
(Xs, ys, ts, names) = common_utils.sort_and_shuffle([Xs, ys, ts, names], B)
for i in range(0, current_size, B):
X = common_utils.pad_zeros(Xs[i:i + B])
y = ys[i:i+B]
y_true = np.array(y)
batch_names = names[i:i+B]
batch_ts = ts[i:i+B]
if self.partition == 'log':
y = [metrics.get_bin_log(x, 10) for x in y]
if self.partition == 'custom':
y = [metrics.get_bin_custom(x, 10) for x in y]
y = np.array(y)
if self.return_y_true:
batch_data = (X, y, y_true)
else:
batch_data = (X, y)
if not self.return_names:
yield batch_data
else:
yield {"data": batch_data, "names": batch_names, "ts": batch_ts}
def __iter__(self):
return self.generator
def next(self, return_y_true=False):
with self.lock:
self.return_y_true = return_y_true
return next(self.generator)
def __next__(self):
return self.next()
class BatchGenDeepSupervision(object):
def __init__(self, dataloader, partition, discretizer, normalizer,
batch_size, shuffle, return_names=False):
self.partition = partition
self.batch_size = batch_size
self.shuffle = shuffle
self.return_names = return_names
self._load_per_patient_data(dataloader, discretizer, normalizer)
self.steps = (len(self.data[1]) + batch_size - 1) // batch_size
self.lock = threading.Lock()
self.generator = self._generator()
def _load_per_patient_data(self, dataloader, discretizer, normalizer):
timestep = discretizer._timestep
def get_bin(t):
eps = 1e-6
return int(t / timestep - eps)
N = len(dataloader._data["X"])
Xs = []
ts = []
masks = []
ys = []
names = []
for i in range(N):
X = dataloader._data["X"][i]
cur_ts = dataloader._data["ts"][i]
cur_ys = dataloader._data["ys"][i]
name = dataloader._data["name"][i]
cur_ys = [float(x) for x in cur_ys]
T = max(cur_ts)
nsteps = get_bin(T) + 1
mask = [0] * nsteps
y = [0] * nsteps
for pos, z in zip(cur_ts, cur_ys):
mask[get_bin(pos)] = 1
y[get_bin(pos)] = z
X = discretizer.transform(X, end=T)[0]
if normalizer is not None:
X = normalizer.transform(X)
Xs.append(X)
masks.append(np.array(mask))
ys.append(np.array(y))
names.append(name)
ts.append(cur_ts)
assert np.sum(mask) > 0
assert len(X) == len(mask) and len(X) == len(y)
self.data = [[Xs, masks], ys]
self.names = names
self.ts = ts
def _generator(self):
B = self.batch_size
while True:
if self.shuffle:
N = len(self.data[1])
order = list(range(N))
random.shuffle(order)
tmp_data = [[[None]*N, [None]*N], [None]*N]
tmp_names = [None] * N
tmp_ts = [None] * N
for i in range(N):
tmp_data[0][0][i] = self.data[0][0][order[i]]
tmp_data[0][1][i] = self.data[0][1][order[i]]
tmp_data[1][i] = self.data[1][order[i]]
tmp_names[i] = self.names[order[i]]
tmp_ts[i] = self.ts[order[i]]
self.data = tmp_data
self.names = tmp_names
self.ts = tmp_ts
else:
# sort entirely
Xs = self.data[0][0]
masks = self.data[0][1]
ys = self.data[1]
(Xs, masks, ys, self.names, self.ts) = common_utils.sort_and_shuffle([Xs, masks, ys,
self.names, self.ts], B)
self.data = [[Xs, masks], ys]
for i in range(0, len(self.data[1]), B):
X = self.data[0][0][i:i+B]
mask = self.data[0][1][i:i+B]
y = self.data[1][i:i+B]
names = self.names[i:i+B]
ts = self.ts[i:i+B]
y_true = [np.array(x) for x in y]
y_true = common_utils.pad_zeros(y_true)
y_true = np.expand_dims(y_true, axis=-1)
if self.partition == 'log':
y = [np.array([metrics.get_bin_log(x, 10) for x in z]) for z in y]
if self.partition == 'custom':
y = [np.array([metrics.get_bin_custom(x, 10) for x in z]) for z in y]
X = common_utils.pad_zeros(X) # (B, T, D)
mask = common_utils.pad_zeros(mask) # (B, T)
y = common_utils.pad_zeros(y)
y = np.expand_dims(y, axis=-1)
if self.return_y_true:
batch_data = ([X, mask], y, y_true)
else:
batch_data = ([X, mask], y)
if not self.return_names:
yield batch_data
else:
yield {"data": batch_data, "names": names, "ts": ts}
def __iter__(self):
return self.generator
def next(self, return_y_true=False):
with self.lock:
self.return_y_true = return_y_true
return next(self.generator)
def __next__(self):
return self.next()
def save_results(names, ts, pred, y_true, path, aleatoric=None, epistemic=None):
common_utils.create_directory(os.path.dirname(path))
with open(path, 'w') as f:
if aleatoric is not None and epistemic is not None:
f.write("stay,period_length,prediction,y_true,epistemic,aleatoric,uncertainty\n")
for (name, t, x, y, e, a) in zip(names, ts, pred, y_true, epistemic, aleatoric):
f.write("{},{:.6f},{:.6f},{:.6f},{:.6f},{:.6f},{:.6f}\n".format(name, t, x, y, e, a, e+a))
else:
f.write("stay,period_length,prediction,y_true\n")
for (name, t, x, y) in zip(names, ts, pred, y_true):
f.write("{},{:.6f},{:.6f},{:.6f}\n".format(name, t, x, y))
| 34.893004 | 110 | 0.504659 |
7944ccffde3d53c525a26db684c6991b75336f39 | 2,663 | py | Python | tests/test_geocoder_autocomplete_api.py | mnogoruk/HerePy | 4fb91310d1ca5993f2ce89c9f73461f523c754ec | [
"MIT"
] | null | null | null | tests/test_geocoder_autocomplete_api.py | mnogoruk/HerePy | 4fb91310d1ca5993f2ce89c9f73461f523c754ec | [
"MIT"
] | null | null | null | tests/test_geocoder_autocomplete_api.py | mnogoruk/HerePy | 4fb91310d1ca5993f2ce89c9f73461f523c754ec | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import time
import unittest
import json
import responses
import herepy
class GeocoderAutoCompleteApiTest(unittest.TestCase):
def setUp(self):
api = herepy.GeocoderAutoCompleteApi("api_key")
self._api = api
def test_initiation(self):
self.assertIsInstance(self._api, herepy.GeocoderAutoCompleteApi)
self.assertEqual(self._api._api_key, "api_key")
self.assertEqual(
self._api._base_url, "https://autosuggest.search.hereapi.com/v1/autosuggest"
)
@responses.activate
def test_addresssuggestion_whensucceed(self):
with open("testdata/models/geocoder_autocomplete.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://autosuggest.search.hereapi.com/v1/autosuggest",
expectedResponse,
status=200,
)
response = self._api.address_suggestion("High", [51.5035, -0.1616], 100)
self.assertTrue(response)
self.assertIsInstance(response, herepy.GeocoderAutoCompleteResponse)
@responses.activate
def test_addresssuggestion_whenerroroccured(self):
with open("testdata/models/geocoder_autocomplete_error.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://autosuggest.search.hereapi.com/v1/autosuggest",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.HEREError):
self._api.address_suggestion("", [51.5035, -0.1616], 100)
@responses.activate
def test_limitresultsbyaddress_whensucceed(self):
with open("testdata/models/geocoder_autocomplete.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://autosuggest.search.hereapi.com/v1/autosuggest",
expectedResponse,
status=200,
)
response = self._api.limit_results_byaddress("Nis", "USA")
self.assertTrue(response)
self.assertIsInstance(response, herepy.GeocoderAutoCompleteResponse)
@responses.activate
def test_limitresultsbyaddress_whenerroroccured(self):
with open("testdata/models/geocoder_autocomplete_error.json", "r") as f:
expectedResponse = f.read()
responses.add(
responses.GET,
"https://autosuggest.search.hereapi.com/v1/autosuggest",
expectedResponse,
status=200,
)
with self.assertRaises(herepy.HEREError):
self._api.limit_results_byaddress("", "")
| 35.039474 | 88 | 0.645888 |
7944cd79f501030f19079f02c86047c998e33be0 | 5,723 | py | Python | apps/files/models.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | apps/files/models.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | apps/files/models.py | iamjdcollins/districtwebsite | 89e2aea47ca3d221665bc23586a4374421be5800 | [
"MIT"
] | null | null | null | from django.db import models
import apps.common.functions as commonfunctions
from apps.objects.models import Node, File as BaseFile
from apps.taxonomy.models import Language
class File(BaseFile):
PARENT_TYPE = ''
PARENT_URL = ''
URL_PREFIX = ''
HAS_PERMISSIONS = False
title = models.CharField(
max_length=200,
help_text='',
)
file_file = models.FileField(
max_length=2000,
upload_to=commonfunctions.file_upload_to,
verbose_name='File',
help_text='',
)
file_language = models.ForeignKey(
Language,
to_field='language_taxonomy_node',
on_delete=models.PROTECT,
limit_choices_to={
'deleted': False,
},
help_text='',
related_name='files_file_file_language',
)
related_node = models.ForeignKey(
Node,
blank=True,
null=True,
related_name='files_file_node',
editable=False,
on_delete=models.CASCADE,
)
file_file_node = models.OneToOneField(
BaseFile,
db_column='file_file_node',
on_delete=models.CASCADE,
parent_link=True,
editable=False,
)
class Meta:
db_table = 'files_file'
get_latest_by = 'update_date'
permissions = (
('trash_file', 'Can soft delete file'),
('restore_file', 'Can restore file'),
)
verbose_name = 'File'
verbose_name_plural = 'Files'
default_manager_name = 'base_manager'
def force_title(self):
return self.file_language.title
file_name = commonfunctions.file_name
save = commonfunctions.modelsave
delete = commonfunctions.modeltrash
class AudioFile(BaseFile):
PARENT_TYPE = ''
PARENT_URL = ''
URL_PREFIX = ''
HAS_PERMISSIONS = False
title = models.CharField(
max_length=200,
help_text='',
)
file_file = models.FileField(
max_length=2000,
upload_to=commonfunctions.file_upload_to,
verbose_name='File',
help_text='',
)
related_node = models.ForeignKey(
Node,
blank=True,
null=True,
related_name='files_audiofile_node',
editable=False,
on_delete=models.CASCADE,
)
audiofile_file_node = models.OneToOneField(
BaseFile,
db_column='audiofile_file_node',
on_delete=models.CASCADE,
parent_link=True,
editable=False,
)
class Meta:
db_table = 'files_audiofile'
get_latest_by = 'update_date'
permissions = (
('trash_audiofile', 'Can soft delete audio file'),
('restore_audiofile', 'Can restore audio file'),
)
verbose_name = 'Audio File'
verbose_name_plural = 'Audio Files'
default_manager_name = 'base_manager'
def force_title(self):
return self._meta.model_name
file_name = commonfunctions.file_name
save = commonfunctions.modelsave
delete = commonfunctions.modeltrash
class VideoFile(BaseFile):
PARENT_TYPE = ''
PARENT_URL = ''
URL_PREFIX = ''
HAS_PERMISSIONS = False
title = models.CharField(
max_length=200,
help_text='',
)
file_file = models.FileField(
max_length=2000,
upload_to=commonfunctions.file_upload_to,
verbose_name='File',
help_text='',
)
related_node = models.ForeignKey(
Node,
blank=True,
null=True,
related_name='files_videofile_node',
editable=False,
on_delete=models.CASCADE,
)
videofile_file_node = models.OneToOneField(
BaseFile,
db_column='videofile_file_node',
on_delete=models.CASCADE,
parent_link=True,
editable=False,
)
class Meta:
db_table = 'files_videofile'
get_latest_by = 'update_date'
permissions = (
('trash_videofile', 'Can soft delete video file'),
('restore_videofile', 'Can restore video file'),
)
verbose_name = 'Video File'
verbose_name_plural = 'Video Files'
default_manager_name = 'base_manager'
def force_title(self):
return self._meta.model_name
file_name = commonfunctions.file_name
save = commonfunctions.modelsave
delete = commonfunctions.modeltrash
class PrecinctMap(BaseFile):
PARENT_TYPE = ''
PARENT_URL = ''
URL_PREFIX = ''
HAS_PERMISSIONS = False
title = models.CharField(
max_length=200,
help_text='',
)
file_file = models.FileField(
max_length=2000,
upload_to=commonfunctions.file_upload_to,
verbose_name='File',
help_text='',
)
related_node = models.ForeignKey(
Node,
blank=True,
null=True,
related_name='files_precinctmap_node',
editable=False,
on_delete=models.CASCADE,
)
file_file_node = models.OneToOneField(
BaseFile,
db_column='precinctmap_file_node',
on_delete=models.CASCADE,
parent_link=True,
editable=False,
)
class Meta:
db_table = 'files_precinctmap'
get_latest_by = 'update_date'
permissions = (
('trash_precinctmap', 'Can soft delete precinct map'),
('restore_file', 'Can restore precinct map'),
)
verbose_name = 'Precinct Map'
verbose_name_plural = 'Precinct Maps'
default_manager_name = 'base_manager'
def force_title(self):
return self.parent.node_title + ' Map'
file_name = commonfunctions.file_name
save = commonfunctions.modelsave
delete = commonfunctions.modeltrash
| 25.211454 | 66 | 0.615237 |
7944ce2abf1bb530fcc0798f0948cc9d5989d052 | 2,380 | py | Python | sdk/AsposeEmailCloudSdk/models/object_exists_request.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | 1 | 2020-02-26T13:19:06.000Z | 2020-02-26T13:19:06.000Z | sdk/AsposeEmailCloudSdk/models/object_exists_request.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | null | null | null | sdk/AsposeEmailCloudSdk/models/object_exists_request.py | aspose-email-cloud/aspose-email-cloud-python | c5c13839cbbbfa5b6617bd1aedf3cf30cd664227 | [
"MIT"
] | null | null | null | # coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="object_exists_request.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
from AsposeEmailCloudSdk.models import *
class ObjectExistsRequest(object):
"""
Request model for object_exists operation.
Initializes a new instance.
:param path: File or folder path e.g. '/file.ext' or '/folder'
:type path: str
:param storage_name: Storage name
:type storage_name: str
:param version_id: File version ID
:type version_id: str
"""
def __init__(self, path: str, storage_name: str = None, version_id: str = None):
"""
Request model for object_exists operation.
Initializes a new instance.
:param path: File or folder path e.g. '/file.ext' or '/folder'
:type path: str
:param storage_name: Storage name
:type storage_name: str
:param version_id: File version ID
:type version_id: str
"""
self.path = path
self.storage_name = storage_name
self.version_id = version_id
| 40.338983 | 84 | 0.652101 |
7944ce2e6ffd68fd56af848812007087e5ad284f | 1,267 | py | Python | newdle/vendor/django_mail/backends/locmem.py | linhhvo/newdle | e66cc3c256b5e405c5c5a5306757c798162a36af | [
"MIT"
] | 2 | 2020-10-02T14:12:17.000Z | 2020-10-02T14:12:19.000Z | newdle/vendor/django_mail/backends/locmem.py | linhhvo/newdle | e66cc3c256b5e405c5c5a5306757c798162a36af | [
"MIT"
] | 2 | 2020-09-24T14:10:42.000Z | 2020-09-24T16:01:22.000Z | newdle/vendor/django_mail/backends/locmem.py | linhhvo/newdle | e66cc3c256b5e405c5c5a5306757c798162a36af | [
"MIT"
] | null | null | null | # The code in here is taken almost verbatim from `django.core.mail.backends.locmem`,
# which is licensed under the three-clause BSD license and is originally
# available on the following URL:
# https://github.com/django/django/blob/stable/2.2.x/django/core/mail/backends/locmem.py
# Credits of the original code go to the Django Software Foundation
# and their contributors.
"""
Backend for test environment.
"""
from newdle.vendor import django_mail
from .base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""
An email backend for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(django_mail, 'outbox'):
django_mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
msg_count = 0
for message in messages: # .message() triggers header validation
message.message()
django_mail.outbox.append(message)
msg_count += 1
return msg_count
| 31.675 | 88 | 0.694554 |
7944cee418b9a5dd8204e5bbf9b950e84a92b1ea | 1,055 | py | Python | examples/calibration.py | lanius/vuzixwrapdev | 694ce5b53599cdad3979f1193efe1b83a36dbd38 | [
"MIT"
] | 1 | 2015-07-31T21:54:49.000Z | 2015-07-31T21:54:49.000Z | examples/calibration.py | lanius/vuzixwrapdev | 694ce5b53599cdad3979f1193efe1b83a36dbd38 | [
"MIT"
] | null | null | null | examples/calibration.py | lanius/vuzixwrapdev | 694ce5b53599cdad3979f1193efe1b83a36dbd38 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
from time import sleep
import vuzixwrapdev
def calibrate():
with vuzixwrapdev.opening() as device:
print('Calibration start presently. Move your head.')
device.begin_calibrate()
sleep(8)
device.end_calibrate()
print('Finished.')
def set_zero():
with vuzixwrapdev.opening() as device:
print('Set zero. Look forward.')
sleep(3)
device.zero_set()
print('Finished.')
def main():
description = 'Calibrate Vuzix Wrap device'
parser = argparse.ArgumentParser(description)
parser.add_argument('-c', '--calibrate', dest='calibrate',
action='store_true', help='run calibration')
parser.add_argument('-z', '--setzero', dest='set_zero',
action='store_true', help='run set zero')
args = parser.parse_args()
if args.calibrate:
calibrate()
elif args.set_zero:
set_zero()
else:
parser.print_help()
if __name__ == '__main__':
main()
| 23.444444 | 68 | 0.603791 |
7944cf100faaafd411e176f72a07e82e41524366 | 57,238 | py | Python | Lib/xml/etree/ElementTree.py | neyazahmad007/cpython | 3e0f1fc4e0ffcfcc706015fa3d67c262948ef171 | [
"PSF-2.0"
] | 27 | 2017-04-21T14:57:04.000Z | 2021-11-03T22:10:38.000Z | Lib/xml/etree/ElementTree.py | ShipraaMathur/cpython | 584f24b2f3d1f6d6663d9cf49ef653264b6e004e | [
"PSF-2.0"
] | null | null | null | Lib/xml/etree/ElementTree.py | ShipraaMathur/cpython | 584f24b2f3d1f6d6663d9cf49ef653264b6e004e | [
"PSF-2.0"
] | 9 | 2017-04-26T14:14:05.000Z | 2020-12-14T16:26:41.000Z | """Lightweight XML support for Python.
XML is an inherently hierarchical data format, and the most natural way to
represent it is with a tree. This module has two classes for this purpose:
1. ElementTree represents the whole XML document as a tree and
2. Element represents a single node in this tree.
Interactions with the whole document (reading and writing to/from files) are
usually done on the ElementTree level. Interactions with a single XML element
and its sub-elements are done on the Element level.
Element is a flexible container object designed to store hierarchical data
structures in memory. It can be described as a cross between a list and a
dictionary. Each Element has a number of properties associated with it:
'tag' - a string containing the element's name.
'attributes' - a Python dictionary storing the element's attributes.
'text' - a string containing the element's text content.
'tail' - an optional string containing text after the element's end tag.
And a number of child elements stored in a Python sequence.
To create an element instance, use the Element constructor,
or the SubElement factory function.
You can also use the ElementTree class to wrap an element structure
and convert it to and from XML.
"""
#---------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
#
# ElementTree
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser", "XMLPullParser",
"register_namespace",
]
VERSION = "1.3.0"
import sys
import re
import warnings
import io
import collections
import contextlib
from . import ElementPath
class ParseError(SyntaxError):
"""An error when parsing an XML document.
In addition to its exception value, a ParseError contains
two extra attributes:
'code' - the specific exception code
'position' - the line and column of the error
"""
pass
# --------------------------------------------------------------------
def iselement(element):
"""Return True if *element* appears to be an Element."""
return hasattr(element, 'tag')
class Element:
"""An XML element.
This class is the reference implementation of the Element interface.
An element's length is its number of subelements. That means if you
want to check if an element is truly empty, you should check BOTH
its length AND its text attribute.
The element tag, attribute names, and attribute values can be either
bytes or strings.
*tag* is the element name. *attrib* is an optional dictionary containing
element attributes. *extra* are additional element attributes given as
keyword arguments.
Example form:
<tag attrib>text<child/>...</tag>tail
"""
tag = None
"""The element's name."""
attrib = None
"""Dictionary of the element's attributes."""
text = None
"""
Text before first subelement. This is either a string or the value None.
Note that if there is no text, this attribute may be either
None or the empty string, depending on the parser.
"""
tail = None
"""
Text after this element's end tag, but before the next sibling element's
start tag. This is either a string or the value None. Note that if there
was no text, this attribute may be either None or an empty string,
depending on the parser.
"""
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self))
def makeelement(self, tag, attrib):
"""Create a new element with the same type.
*tag* is a string containing the element name.
*attrib* is a dictionary containing the element attributes.
Do not call this method, use the SubElement factory function instead.
"""
return self.__class__(tag, attrib)
def copy(self):
"""Return copy of current element.
This creates a shallow copy. Subelements will be shared with the
original tree.
"""
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
def __getitem__(self, index):
return self._children[index]
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
def __delitem__(self, index):
del self._children[index]
def append(self, subelement):
"""Add *subelement* to the end of this element.
The new element will appear in document order after the last existing
subelement (or directly after the text, if it's the first subelement),
but before the end tag for this element.
"""
self._assert_is_element(subelement)
self._children.append(subelement)
def extend(self, elements):
"""Append subelements from a sequence.
*elements* is a sequence with zero or more elements.
"""
for element in elements:
self._assert_is_element(element)
self._children.extend(elements)
def insert(self, index, subelement):
"""Insert *subelement* at position *index*."""
self._assert_is_element(subelement)
self._children.insert(index, subelement)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element_Py):
raise TypeError('expected an Element, not %s' % type(e).__name__)
def remove(self, subelement):
"""Remove matching subelement.
Unlike the find methods, this method compares elements based on
identity, NOT ON tag value or contents. To remove subelements by
other means, the easiest way is to use a list comprehension to
select what elements to keep, and then use slice assignment to update
the parent element.
ValueError is raised if a matching element could not be found.
"""
# assert iselement(element)
self._children.remove(subelement)
def getchildren(self):
"""(Deprecated) Return all subelements.
Elements are returned in document order.
"""
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
return ElementPath.find(self, path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find text for first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*default* is the value to return if the element was not found,
*namespaces* is an optional mapping from namespace prefix to full name.
Return text content of first matching element, or default value if
none was found. Note that if an element is found having no text
content, the empty string is returned.
"""
return ElementPath.findtext(self, path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Returns list containing all matching elements in document order.
"""
return ElementPath.findall(self, path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
return ElementPath.iterfind(self, path, namespaces)
def clear(self):
"""Reset element.
This function removes all subelements, clears all attributes, and sets
the text and tail attributes to None.
"""
self.attrib.clear()
self._children = []
self.text = self.tail = None
def get(self, key, default=None):
"""Get element attribute.
Equivalent to attrib.get, but some implementations may handle this a
bit more efficiently. *key* is what attribute to look for, and
*default* is what to return if the attribute was not found.
Returns a string containing the attribute value, or the default if
attribute was not found.
"""
return self.attrib.get(key, default)
def set(self, key, value):
"""Set element attribute.
Equivalent to attrib[key] = value, but some implementations may handle
this a bit more efficiently. *key* is what attribute to set, and
*value* is the attribute value to set it to.
"""
self.attrib[key] = value
def keys(self):
"""Get list of attribute names.
Names are returned in an arbitrary order, just like an ordinary
Python dict. Equivalent to attrib.keys()
"""
return self.attrib.keys()
def items(self):
"""Get element attributes as a sequence.
The attributes are returned in arbitrary order. Equivalent to
attrib.items().
Return a list of (name, value) tuples.
"""
return self.attrib.items()
def iter(self, tag=None):
"""Create tree iterator.
The iterator loops over the element and all subelements in document
order, returning all elements with a matching tag.
If the tree structure is modified during iteration, new or removed
elements may or may not be included. To get a stable set, use the
list() function on the iterator, and loop over the resulting list.
*tag* is what tags to look for (default is to return all elements)
Return an iterator containing all the matching elements.
"""
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
yield from e.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def itertext(self):
"""Create text iterator.
The iterator loops over the element and all subelements in document
order, returning all inner text.
"""
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
t = self.text
if t:
yield t
for e in self:
yield from e.itertext()
t = e.tail
if t:
yield t
def SubElement(parent, tag, attrib={}, **extra):
"""Subelement factory which creates an element instance, and appends it
to an existing parent.
The element tag, attribute names, and attribute values can be either
bytes or Unicode strings.
*parent* is the parent element, *tag* is the subelements name, *attrib* is
an optional directory containing element attributes, *extra* are
additional attributes given as keyword arguments.
"""
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
def Comment(text=None):
"""Comment element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*text* is a string containing the comment string.
"""
element = Element(Comment)
element.text = text
return element
def ProcessingInstruction(target, text=None):
"""Processing Instruction element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*target* is a string containing the processing instruction, *text* is a
string containing the processing instruction contents, if any.
"""
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
class QName:
"""Qualified name wrapper.
This class can be used to wrap a QName attribute value in order to get
proper namespace handing on output.
*text_or_uri* is a string containing the QName value either in the form
{uri}local, or if the tag argument is given, the URI part of a QName.
*tag* is an optional argument which if given, will make the first
argument (text_or_uri) be interpreted as a URI, and this argument (tag)
be interpreted as a local name.
"""
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.text)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
# --------------------------------------------------------------------
class ElementTree:
"""An XML element hierarchy.
This class also provides support for serialization to and from
standard XML.
*element* is an optional root element node,
*file* is an optional file handle or file name of an XML file whose
contents will be used to initialize the tree with.
"""
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
def getroot(self):
"""Return root element of this tree."""
return self._root
def _setroot(self, element):
"""Replace root element of this tree.
This will discard the current contents of the tree and replace it
with the given element. Use with care!
"""
# assert iselement(element)
self._root = element
def parse(self, source, parser=None):
"""Load external XML document into element tree.
*source* is a file name or file object, *parser* is an optional parser
instance that defaults to XMLParser.
ParseError is raised if the parser fails to parse the document.
Returns the root element of the given source document.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if parser is None:
# If no parser was specified, create a default XMLParser
parser = XMLParser()
if hasattr(parser, '_parse_whole'):
# The default XMLParser, when it comes from an accelerator,
# can define an internal _parse_whole API for efficiency.
# It can be used to parse the whole source without feeding
# it with chunks.
self._root = parser._parse_whole(source)
return self._root
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
def iter(self, tag=None):
"""Create and return tree iterator for the root element.
The iterator loops over all elements in this tree, in document order.
*tag* is a string with the tag name to iterate over
(default is to return all elements).
"""
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().find(path), which is Element.find()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().findtext(path), which is Element.findtext()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().findall(path), which is Element.findall().
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return list containing all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().iterfind(path), which is element.iterfind()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None, *,
short_empty_elements=True):
"""Write element tree to a file as XML.
Arguments:
*file_or_filename* -- file name or a file object opened for writing
*encoding* -- the output encoding (default: US-ASCII)
*xml_declaration* -- bool indicating if an XML declaration should be
added to the output. If None, an XML declaration
is added if encoding IS NOT either of:
US-ASCII, UTF-8, or Unicode
*default_namespace* -- sets the default XML namespace (for "xmlns")
*method* -- either "xml" (default), "html, "text", or "c14n"
*short_empty_elements* -- controls the formatting of elements
that contain no content. If True (default)
they are emitted as a single self-closed
tag, otherwise they are emitted as a pair
of start/end tags
"""
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
enc_lower = encoding.lower()
with _get_writer(file_or_filename, enc_lower) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
enc_lower not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if enc_lower == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces,
short_empty_elements=short_empty_elements)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resources after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not short_empty_elements:
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
ltag = tag.lower()
if text:
if ltag == "script" or ltag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
def register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
# The following business with carriage returns is to satisfy
# Section 2.11 of the XML specification, stating that
# CR or CR LN should be replaced with just LN
# http://www.w3.org/TR/REC-xml/#sec-line-ends
if "\r\n" in text:
text = text.replace("\r\n", "\n")
if "\r" in text:
text = text.replace("\r", "\n")
#The following four lines are issue 17582
if "\n" in text:
text = text.replace("\n", " ")
if "\t" in text:
text = text.replace("\t", "	")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
def tostring(element, encoding=None, method=None, *,
short_empty_elements=True):
"""Generate string representation of XML element.
All subelements are included. If encoding is "unicode", a string
is returned. Otherwise a bytestring is returned.
*element* is an Element instance, *encoding* is an optional output
encoding defaulting to US-ASCII, *method* is an optional output which can
be one of "xml" (default), "html", "text" or "c14n".
Returns an (optionally) encoded string containing the XML data.
"""
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding, method=method,
short_empty_elements=short_empty_elements)
return stream.getvalue()
class _ListDataStream(io.BufferedIOBase):
"""An auxiliary stream accumulating into a list reference."""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None, *,
short_empty_elements=True):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding, method=method,
short_empty_elements=short_empty_elements)
return lst
def dump(elem):
"""Write element tree or element structure to sys.stdout.
This function should be used for debugging only.
*elem* is either an ElementTree, or a single Element. The exact output
format is implementation dependent. In this version, it's written as an
ordinary XML file.
"""
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
def parse(source, parser=None):
"""Parse XML document into element tree.
*source* is a filename or file object containing XML data,
*parser* is an optional parser instance defaulting to XMLParser.
Return an ElementTree instance.
"""
tree = ElementTree()
tree.parse(source, parser)
return tree
def iterparse(source, events=None, parser=None):
"""Incrementally parse XML document into ElementTree.
This class also reports what's going on to the user based on the
*events* it is initialized with. The supported events are the strings
"start", "end", "start-ns" and "end-ns" (the "ns" events are used to get
detailed namespace information). If *events* is omitted, only
"end" events are reported.
*source* is a filename or file object containing XML data, *events* is
a list of events to report back, *parser* is an optional parser instance.
Returns an iterator providing (event, elem) pairs.
"""
# Use the internal, undocumented _parser argument for now; When the
# parser argument of iterparse is removed, this can be killed.
pullparser = XMLPullParser(events=events, _parser=parser)
def iterator():
try:
while True:
yield from pullparser.read_events()
# load event buffer
data = source.read(16 * 1024)
if not data:
break
pullparser.feed(data)
root = pullparser._close_and_return_root()
yield from pullparser.read_events()
it.root = root
finally:
if close_source:
source.close()
class IterParseIterator(collections.Iterator):
__next__ = iterator().__next__
it = IterParseIterator()
it.root = None
del iterator, IterParseIterator
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
return it
class XMLPullParser:
def __init__(self, events=None, *, _parser=None):
# The _parser argument is for internal use only and must not be relied
# upon in user code. It will be removed in a future release.
# See http://bugs.python.org/issue17741 for more details.
self._events_queue = collections.deque()
self._parser = _parser or XMLParser(target=TreeBuilder())
# wire up the parser for event reporting
if events is None:
events = ("end",)
self._parser._setevents(self._events_queue, events)
def feed(self, data):
"""Feed encoded data to parser."""
if self._parser is None:
raise ValueError("feed() called after end of stream")
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._events_queue.append(exc)
def _close_and_return_root(self):
# iterparse needs this to set its root attribute properly :(
root = self._parser.close()
self._parser = None
return root
def close(self):
"""Finish feeding data to parser.
Unlike XMLParser, does not return the root element. Use
read_events() to consume elements from XMLPullParser.
"""
self._close_and_return_root()
def read_events(self):
"""Return an iterator over currently available (event, elem) pairs.
Events are consumed from the internal event queue as they are
retrieved from the iterator.
"""
events = self._events_queue
while events:
event = events.popleft()
if isinstance(event, Exception):
raise event
else:
yield event
def XML(text, parser=None):
"""Parse XML document from string constant.
This function can be used to embed "XML Literals" in Python code.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
def XMLID(text, parser=None):
"""Parse XML document from string constant for its IDs.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an (Element, dict) tuple, in which the
dict maps element id:s to elements.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
# Parse XML document from string constant. Alias for XML().
fromstring = XML
def fromstringlist(sequence, parser=None):
"""Parse XML document from sequence of string fragments.
*sequence* is a list of other sequence, *parser* is an optional parser
instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
class TreeBuilder:
"""Generic element structure builder.
This builder converts a sequence of start, data, and end method
calls to a well-formed element structure.
You can use this class to build an element structure using a custom XML
parser, or a parser for some other XML-like format.
*element_factory* is an optional element factory which is called
to create new Element instances, as necessary.
"""
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
def close(self):
"""Flush builder buffers and return toplevel document Element."""
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
def data(self, data):
"""Add text to current element."""
self._data.append(data)
def start(self, tag, attrs):
"""Open new element and return it.
*tag* is the element name, *attrs* is a dict containing element
attributes.
"""
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
def end(self, tag):
"""Close and return current Element.
*tag* is the element name.
"""
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
_sentinel = ['sentinel']
# also see ElementTree and TreeBuilder
class XMLParser:
"""Element structure builder for XML source data based on the expat parser.
*html* are predefined HTML entities (deprecated and not supported),
*target* is an optional target object which defaults to an instance of the
standard TreeBuilder class, *encoding* is an optional encoding string
which if given, overrides the encoding specified in the XML file:
http://www.iana.org/assignments/character-sets
"""
def __init__(self, html=_sentinel, target=None, encoding=None):
if html is not _sentinel:
warnings.warn(
"The html argument of XMLParser() is deprecated",
DeprecationWarning, stacklevel=2)
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# Configure pyexpat: buffering, new-style attribute handling.
parser.buffer_text = 1
parser.ordered_attributes = 1
parser.specified_attributes = 1
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _setevents(self, events_queue, events_to_report):
# Internal API for XMLPullParser
# events_to_report: a list of events to report during parsing (same as
# the *events* of XMLPullParser's constructor.
# events_queue: a list of actual parsing events that will be populated
# by the underlying parser.
#
parser = self._parser
append = events_queue.append
for event_name in events_to_report:
if event_name == "start":
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event_name, append=append,
start=self._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event_name == "end":
def handler(tag, event=event_name, append=append,
end=self._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event_name == "start-ns":
def handler(prefix, uri, event=event_name, append=append):
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event_name == "end-ns":
def handler(prefix, event=event_name, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event_name)
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start(self, tag, attr_list):
# Handler for expat's StartElementHandler. Since ordered_attributes
# is set, the attributes are reported as a list of alternating
# attribute name,value.
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attr_list:
for i in range(0, len(attr_list), 2):
attrib[fixname(attr_list[i])] = attr_list[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype != self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
def doctype(self, name, pubid, system):
"""(Deprecated) Handle doctype declaration
*name* is the Doctype name, *pubid* is the public identifier,
and *system* is the system identifier.
"""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
def feed(self, data):
"""Feed encoded data to parser."""
try:
self.parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)
def close(self):
"""Finish feeding data to parser and return element structure."""
try:
self.parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# Import the C accelerators
try:
# Element is going to be shadowed by the C implementation. We need to keep
# the Python version of it accessible for some "creative" by external code
# (see tests)
_Element_Py = Element
# Element, SubElement, ParseError, TreeBuilder, XMLParser
from _elementtree import *
except ImportError:
pass
| 34.43923 | 79 | 0.580873 |
7944d04fe14c2d01aed24ce470edd4acada192ec | 5,035 | py | Python | cellcounter/statistics/tests.py | oghm2/cellcounter | e39f07cc5fc2f78f509efaaac308efa5fb35f3ef | [
"MIT"
] | 2 | 2016-11-02T01:01:36.000Z | 2018-09-07T19:10:41.000Z | cellcounter/statistics/tests.py | oghm2/cellcounter | e39f07cc5fc2f78f509efaaac308efa5fb35f3ef | [
"MIT"
] | 76 | 2015-01-16T18:46:06.000Z | 2022-03-21T22:16:10.000Z | cellcounter/statistics/tests.py | oghm2/cellcounter | e39f07cc5fc2f78f509efaaac308efa5fb35f3ef | [
"MIT"
] | 3 | 2020-05-31T11:25:45.000Z | 2020-07-27T12:06:53.000Z | from importlib import import_module
from django.conf import settings
from django.urls import reverse
from django.contrib.auth.models import User
from django.core.cache import cache
from django.test import TestCase, RequestFactory
from django.shortcuts import render
from rest_framework import status
from rest_framework.test import APIRequestFactory, APITestCase, force_authenticate
from .views import ListCreateCountInstanceAPI
from .middleware import StatsSessionMiddleware
from .models import CountInstance
factory = APIRequestFactory()
view = ListCreateCountInstanceAPI.as_view()
class TestStatsMiddleware(TestCase):
def setUp(self):
self.request = RequestFactory().get(reverse('create-count-instance'))
self.request.session = {}
self.request.COOKIES = {}
self.mw = StatsSessionMiddleware()
def test_empty_session(self):
self.mw.process_request(self.request)
self.assertIsNotNone(self.request.session.session_key)
def test_no_key_session(self):
self.mw.process_request(self.request)
self.assertIsNotNone(self.request.session.session_key)
def test_key_session(self):
"""Don't create new session id when one is already set
"""
session_engine = import_module(settings.SESSION_ENGINE)
SessionStore = session_engine.SessionStore
session_id = SessionStore(None)
session_id.save()
self.request.COOKIES['sessionid'] = session_id.session_key
self.mw.process_request(self.request)
self.assertEqual(session_id.session_key, self.request.session.session_key)
class TestCountInstanceAPI(APITestCase):
def setUp(self):
self.user = User.objects.create_user('basic', '[email protected]', 'basic')
self.staff_user = User.objects.create_superuser('permitted',
'[email protected]',
'password')
self.url = reverse('create-count-instance')
self.data = {'count_total': 100}
cache.clear()
def test_create_permissions(self):
request = factory.post('/', {'count_total': 100}, format='json')
StatsSessionMiddleware().process_request(request)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
request = factory.post('/', {'count_total': 100}, format='json')
StatsSessionMiddleware().process_request(request)
force_authenticate(request, user=self.staff_user)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
request = factory.post('/', {'count_total': 100}, format='json')
StatsSessionMiddleware().process_request(request)
force_authenticate(request, user=self.user)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_safe_permissions(self):
request = factory.get('/')
force_authenticate(request, user=self.staff_user)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
request = factory.head('/')
force_authenticate(request, user=self.staff_user)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
request = factory.options('/')
force_authenticate(request, user=self.staff_user)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_anonymous_permissions(self):
request = factory.get('/')
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
request = factory.head('/')
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
request = factory.options('/')
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_authenticated_get(self):
instance = CountInstance.objects.create(user=self.user,
session_id='a',
count_total=100,
ip_address="127.0.0.1")
request = factory.get('/')
force_authenticate(request, user=self.staff_user)
response = view(request)
response.render()
self.assertEqual(response.data[0]['session_id'], instance.session_id)
self.assertEqual(response.data[0]['count_total'], instance.count_total)
self.assertEqual(response.data[0]['ip_address'], instance.ip_address)
def test_ratelimit_exceeded(self):
request = factory.post('/', {'count_total': 100}, format='json')
StatsSessionMiddleware().process_request(request)
for dummy in range(2):
response = view(request)
self.assertEqual(429, response.status_code)
| 40.604839 | 83 | 0.667527 |
7944d077e44d461c33053799095ba11bccd0f14c | 948 | py | Python | main/utils/exifdata.py | drhoet/photo-workflow | 4d1e6be82a71fec34e37ddf4096c46d871b24b66 | [
"MIT"
] | null | null | null | main/utils/exifdata.py | drhoet/photo-workflow | 4d1e6be82a71fec34e37ddf4096c46d871b24b66 | [
"MIT"
] | null | null | null | main/utils/exifdata.py | drhoet/photo-workflow | 4d1e6be82a71fec34e37ddf4096c46d871b24b66 | [
"MIT"
] | null | null | null | from datetime import datetime, timezone
from .datetime import has_timezone
def parse_file_filemodifytime(dt_str: str) -> datetime:
return datetime.strptime(dt_str, "%Y:%m:%d %H:%M:%S%z")
def parse_exif_datetimeoriginal(dt_str: str) -> datetime:
""" Returns a *naive* datetime (no timezone information) """
return datetime.strptime(dt_str, "%Y:%m:%d %H:%M:%S")
def parse_exif_offsettime(et_str: str) -> timezone:
dt = datetime.strptime(et_str.replace(":", ""), "%z")
if has_timezone(dt):
return dt.tzinfo
else:
return None
def format_exif_datetimeoriginal(dt: datetime) -> str:
return dt.strftime("%Y:%m:%d %H:%M:%S")
def format_exif_offsettime(dt: datetime) -> str:
tz_str = dt.strftime("%z") # this is in the format +0400
return tz_str[0:3] + ":" + tz_str[3:]
def format_file_modify_date(dt: datetime) -> str:
return format_exif_datetimeoriginal(dt) + format_exif_offsettime(dt) | 29.625 | 72 | 0.681435 |
7944d1349651c37b86e4dabf350de568a122b51e | 1,136 | py | Python | setup.py | ska-sa/scape | 0909436b1d5ab0b068106e0479e95a3089e1c840 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ska-sa/scape | 0909436b1d5ab0b068106e0479e95a3089e1c840 | [
"BSD-3-Clause"
] | 1 | 2021-06-21T23:30:52.000Z | 2021-06-22T12:49:30.000Z | setup.py | ska-sa/scape | 0909436b1d5ab0b068106e0479e95a3089e1c840 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name="scape",
description="Karoo Array Telescope single-dish analysis package'",
author="Ludwig Schwardt",
author_email="[email protected]",
packages=find_packages(),
url='https://github.com/ska-sa/scape',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Astronomy"],
platforms=["OS Independent"],
keywords="meerkat ska",
zip_safe=False,
setup_requires=['katversion'],
use_katversion=True,
test_suite="nose.collector",
install_requires=[
"numpy",
"scipy",
"katpoint",
"scikits.fitting",
"six"
],
tests_require=[
"nose",
"coverage",
"nosexcover",
"unittest2"
])
| 30.702703 | 73 | 0.573944 |
7944d1385986fd9d0e2ae134793a09ec48aa57ab | 1,641 | py | Python | leaf_audio/convolution_test.py | plj1280/leaf-audio | 3a3495670aeb72509ed9498e5c8f1e8809249aeb | [
"Apache-2.0"
] | 377 | 2021-03-02T16:45:16.000Z | 2022-03-31T12:28:07.000Z | leaf_audio/convolution_test.py | plj1280/leaf-audio | 3a3495670aeb72509ed9498e5c8f1e8809249aeb | [
"Apache-2.0"
] | 17 | 2021-03-05T05:56:20.000Z | 2022-03-27T23:17:02.000Z | leaf_audio/convolution_test.py | isabella232/leaf-audio | a509bd2149105c94147bab22ab764435d29a8b20 | [
"Apache-2.0"
] | 46 | 2021-03-03T20:17:00.000Z | 2022-03-12T16:42:54.000Z | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""For FFT-based 1D convolutions."""
from absl.testing import parameterized
from leaf_audio import convolution
import tensorflow.compat.v2 as tf
class ConvTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
tf.random.set_seed(0)
@parameterized.parameters([[1000, 32, 1, 4], [1000, 256, 4, 8]])
def test_fft_conv1d(self, seq_len, filter_len, batch_size, num_filters):
inputs = tf.sort(tf.random.normal(shape=(batch_size, seq_len, 1)), axis=1)
filters = tf.random.normal(shape=(filter_len, 1, num_filters))
target = tf.nn.convolution(inputs, filters, padding='SAME')
outputs, filters_l1 = convolution.fft_conv1d(inputs, filters)
self.assertEqual(outputs.shape, target.shape)
self.assertEqual(outputs.shape, (batch_size, seq_len, num_filters))
self.assertEqual(filters_l1.shape, ())
k = filter_len // 2
self.assertAllClose(outputs[:, k:-k, :], target[:, k:-k, :], atol=0.01)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 33.489796 | 78 | 0.724558 |
7944d1d7fe7b4bfc3755d8c22c678c787611a1a6 | 2,927 | py | Python | pyxivcompanion/token.py | marimelon/pyxivcompanion | c0d4cbc81a61f6e80508e32b012c6ef11b49be93 | [
"MIT"
] | null | null | null | pyxivcompanion/token.py | marimelon/pyxivcompanion | c0d4cbc81a61f6e80508e32b012c6ef11b49be93 | [
"MIT"
] | null | null | null | pyxivcompanion/token.py | marimelon/pyxivcompanion | c0d4cbc81a61f6e80508e32b012c6ef11b49be93 | [
"MIT"
] | null | null | null | import uuid
from pydantic import BaseModel
from .account import Account, LoginObj
from .character import Character
from .config import Config
from .login import Login
from .request import CompanionRequest
from .response import SightResponseError, SightResponseLoginCharacter
class Token(BaseModel):
userId: str
token: str
salt: str
region: str
cid: str
character_name: str
world: str
currentWorld:str
@staticmethod
async def get_character_info(token: str, region: str) -> SightResponseLoginCharacter:
req = CompanionRequest(url=f'{region}{Config.SIGHT_PATH}login/character',
RequestID=str(uuid.uuid1()).upper(),
Token=token)
res = await req.get()
if not res.status == 200:
raise SightResponseError(res)
data = await res.json()
return SightResponseLoginCharacter(**data)
async def refresh(self, sqex_id: str = None, sqex_pass: str = None, otp: str = None):
res_data = await Account.request_token(self.userId)
if res_data.region == "":
if sqex_id is None or sqex_pass is None:
raise Exception('sqex_id and sqex_password required.')
login = await Account.login(sqex_id=sqex_id, sqex_pass=sqex_pass, otp=otp,
userId=self.userId,
token=res_data.token, salt=res_data.salt)
region = await login.get_region(self.cid)
character_info = await self.get_character_info(login.token, region)
else:
self.token = res_data.token
# /login/character
await Login.get_character(token=self)
# /character/worlds
data,res = await Character.get_worlds(token=self)
self.currentWorld = data.currentWorld
@classmethod
async def create_new_token(cls, cid: str, sqex_id: str, sqex_pass: str, otp: str = None):
login = await Account.login(sqex_id, sqex_pass, otp)
return await cls.create_new_token_from_loginobj(cid=cid, login=login)
@classmethod
async def create_new_token_from_loginobj(cls, cid: str, login: LoginObj):
region = await login.get_region(cid)
character_info = await cls.get_character_info(login.token, region)
# /login/character
await login.login_character()
# /character/login-status
login_status = await login.character_login_status()
return cls(userId=login.userId,
token=login.token,
salt=login.salt,
region=region,
cid=cid,
character_name=character_info.character.name,
world=character_info.character.world,
currentWorld=login_status.currentWorld)
| 37.050633 | 94 | 0.612231 |
7944d2c6d9096471a9dc8d0dfe12131e104c0119 | 51,117 | py | Python | tstools/measures.py | parevalo/measures_collector | 056f2b393fcca9811718a6265c32195ca9b9d79a | [
"MIT"
] | 3 | 2020-04-08T22:27:34.000Z | 2021-07-30T04:09:34.000Z | tstools/measures.py | parevalo/measures_collector | 056f2b393fcca9811718a6265c32195ca9b9d79a | [
"MIT"
] | 17 | 2019-04-02T19:21:49.000Z | 2019-07-25T18:02:42.000Z | tstools/measures.py | parevalo/measures_collector | 056f2b393fcca9811718a6265c32195ca9b9d79a | [
"MIT"
] | 8 | 2019-04-04T08:52:19.000Z | 2022-03-02T00:23:06.000Z | # Classes for individual projects
import tstools.utils as utils
import tstools.sql as sql
import tstools.sheets as sheets
import tstools.leaflet_tools as lft
import tstools.ccd as ccd_tools
import ipyleaflet
import os, datetime, sqlite3, time
import pandas as pd
import tstools.plots as plots
import ipywidgets as widgets
import shapely
# Sample interpretation to collect training data for MEaSUREs
class measures(object):
def __init__(self):
measures.sheet = None
measures.sheet2 = None
measures.band_index1 = 4
measures.band_index2 = 4
measures.pyccd_flag = False
measures.pyccd_flag2 = False
conn = sqlite3.connect(measures.dbPath)
measures.current_id = measures.current_id
measures.c = conn.cursor()
measures.minv = 0
measures.maxv = 6000
measures.b1 = 'SWIR1'
measures.b2 = 'NIR'
measures.b3 = 'RED'
####### Starting Variables #######
pyccd_flag = False
pyccd_flag2 = False
current_band = ''
band_index1 = 4
band_index2 = 4
click_col = ''
point_color = ['#43a2ca']
click_df = pd.DataFrame()
click_geojson = ''
box_geojson = ''
click_trainbox = ''
sample_col = ''
sample_df = pd.DataFrame()
samplept_geojson = ''
PyCCDdf = pd.DataFrame()
band_list = ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'BRIGHTNESS',
'GREENNESS', 'WETNESS', 'NDVI']
doy_range = [1, 365]
step = 1 #in years
current_id = 0
####### Database #######
dbPath = os.getcwd() + '/measures_database'
command = '''CREATE TABLE measures
(id text, lat text, lon text, year1 text, year2 text,
coverType text, class1 text, water text,
bare text, albedo text, use text, height text,
transport text, impervious text, density text,
herbType text, shrubType text, phenology text, leafType text,
location text, vegType text, conf text, notes1 text, segType text,
direction text, changeAgent text, confCA text, ca_other text,
seg_notes text, breakYear text, breakRange1 text,
breakRange2 text)'''
conn = sql.make_db(dbPath, command)
###### Widgets ######
# Sliders
years = plots.make_range_slider([1990, 1991], 1990, 2018, 1, 'Years:')
break_years = plots.make_range_slider([1990, 1991], 1990, 2018, 1,
'Confidence:', disabled=True)
break_year = plots.make_slider(1990, 1991, 2018, 1, 'Year:', disabled=True)
confidence = plots.make_slider(0, 0, 3, 1, 'Confidence:')
ca_confidence = plots.make_slider(0, 0, 3, 1, '', disabled=True)
b_ca_confidence = plots.make_slider(0, 0, 3, 1, '', disabled=True)
ylim = plots.make_range_slider([0, 4000], -10000, 10000, 500, 'YLim:')
xlim = plots.make_range_slider([2000, 2020], 1984, 2020, 1, 'XLim:')
ylim2 = plots.make_range_slider([0, 4000], -10000, 10000, 500, 'YLim:')
xlim2 = plots.make_range_slider([2000, 2020], 1984, 2020, 1, 'XLim:')
# Dropdown boxes
drop1 = plots.make_drop('Persistant Ice?', ['Persistant Ice?', 'Yes','No'],
'Decision 2')
drop2 = plots.make_drop('Decision 3', ['Decision 3'], 'Decision 3')
drop3 = plots.make_drop('Decision 4', ['Decision 4'], 'Decision 4')
drop4 = plots.make_drop('Decision 5', ['Decision 5'], 'Decision 5')
drop5 = plots.make_drop('Decision 6', ['Decision 6'], 'Decision 6')
drop6 = plots.make_drop('Decision 7', ['Decision 7'], 'Decision 7')
drop7 = plots.make_drop('Decision 8', ['Decision 8'], 'Decision 8')
drop8 = plots.make_drop('Decision 9', ['Decision 9'], 'Decision 9')
drop9 = plots.make_drop('Select type', ['Select type', 'Stable',
'Transitional'], '')
drop0 = plots.make_drop('Dominant or Secondary?', ['Dominant or Secondary?',
'Dominant', 'Secondary'],
'Decision 1')
band_selector1 = plots.make_drop('SWIR1', band_list, 'Select band')
band_selector2 = plots.make_drop('SWIR1', band_list, 'Select band')
image_band_1 = plots.make_drop('SWIR1', band_list, 'Red:')
image_band_2 = plots.make_drop('NIR', band_list, 'Green:')
image_band_3 = plots.make_drop('RED', band_list, 'Blue:')
# Checkbox
color_check = plots.make_checkbox(False, 'Color DOY', False)
break_check = plots.make_checkbox(False, 'Land Cover Change in TS?', False)
click_train = plots.make_checkbox(False, 'Collect TS training', False)
# Select multiple
_veg_selector = ['Select a modifier', 'None', 'Cropland', 'Plantation',
'Wetland', 'Riparian/Flood', 'Mangrove', 'Trees/Shrubs Present']
veg_selector = plots.make_selector(['Select a modifier'], _veg_selector,
'Veg Modifier:', disabled=True)
_change_selector = ['None', 'Deforestation/Logging', 'Fire', 'Insect damage',
'Urban Dev.', 'Flooding', 'Decline/Degradation',
'Regrowth', 'Riparian/Water shift', 'Drought','Unknown',
'Other (Specify)']
change_selector = plots.make_selector(['None'], _change_selector, '',
disabled=True)
_b_change_selector = ['None', 'Deforestation/Logging', 'Fire',
'Insect damage', 'Urban Dev.', 'Flooding',
'Decline/Degradation', 'Regrowth','Drought',
'Riparian/Water shift', 'Other (Specify)']
b_change_selector = plots.make_selector(['None'], _b_change_selector, '',
disabled=True)
_direction = ['NA', 'Unknown', 'Veg Increase', 'Veg Decrease',
'Water Increase', 'Water Decrease', 'Bare Increase',
'Bare Decrease', 'Urban Increase', 'Urban Decrease',
'Albedo Increase', 'Albedo Decrease']
direction = plots.make_selector(['NA'], _direction, '', disabled=True)
_b_direction = ['NA', 'Unknown', 'Veg Increase', 'Veg Decrease',
'Water Increase', 'Water Decrease', 'Bare Increase',
'Bare Decrease', 'Urban Increase', 'Urban Decrease',
'Albedo Increase', 'Albedo Decrease']
b_direction = plots.make_selector(['NA'], _b_direction, '', disabled=True)
# Text boxes
change_other = plots.make_text('Specify other', 'Specify other', 'Other:',
disabled=True)
b_change_other = plots.make_text('Specify other', 'Specify other', 'Other:',
disabled=True)
notes = plots.make_text_large('Enter any useful or interesting information \
about the land cover of this sample', '',
'Notes', layout=widgets.Layout())
notes_seg_trend = plots.make_text_large('Enter any useful or interesting \
information about the Time Trend \
of this sample',
'',
'Notes', layout=widgets.Layout())
notes_break = plots.make_text_large('Enter any useful or interesting \
information about the Break in the \
time series', '', 'Notes',
layout=widgets.Layout(), disabled=True) # TODO: save this
spreadsheet = plots.make_text('Google Spreadsheet Credential JSON',
'Google Spreadsheet Credential JSON',
'Credentials:')
spreadName = plots.make_text('Google Spreadsheet Name',
'Google Spreadsheet Name', 'SS Name:')
sampleWidget = plots.make_text('Path to sample feature collection',
'Path to sample feature collection',
'Path:')
stretch_min = plots.make_text_float(0, 0, 'Min:')
stretch_max = plots.make_text_float(6000, 6000, 'Max:')
zoom_box = plots.make_text_float(18, 18, 'Zoom:')
idBox = plots.make_text('0', '0', 'ID:')
go_to_lat = plots.make_text('0', 'Lat','')
go_to_lon = plots.make_text('0', 'Lon','')
go_to_lat.layout=widgets.Layout(width='20%')
go_to_lon.layout=widgets.Layout(width='20%')
# Buttons
validate = plots.make_button(False, 'Validate', icon='check')
save_button = plots.make_button(False, 'Save Segment LC', icon='')
b_save_button = plots.make_button(False, 'Save Break', icon='', disabled=True) #TODO: does nothing
load_button = plots.make_button(False, 'Load', icon='')
toggle_pyccd_button = plots.make_button(False, 'Clear Pyccd 1', icon='')
toggle_pyccd_button2 = plots.make_button(False, 'Clear Pyccd 2', icon='')
return_button = plots.make_button(False, 'Return to Sample', icon='')
next_pt = plots.make_button(False, 'Next point', icon='')
previous_pt = plots.make_button(False, 'Previous point', icon='')
pyccd_button = plots.make_button(False, 'Run PyCCD 1', icon='')
pyccd_button2 = plots.make_button(False, 'Run PyCCD 2', icon='')
clear_layers = plots.make_button(False, 'Clear Map', icon='')
# Validate
valid = plots.make_valid(False, 'Not Saved', '')
b_valid = plots.make_valid(False, 'Not Saved', '') # TODO: DOes nothing/
valid_load = plots.make_valid(False, 'Not Loaded', '')
# HTML
pt_message = plots.make_html('<b>Current ID:</b>')
time_label = plots.make_html('')
sample_coord_message = plots.make_html('Sample Lat, Lon: ')
click_coord_message = plots.make_html('Clicked Lat, Lon: ')
selected_label = plots.make_html('ID of selected point')
hover_label = plots.make_html('Test Value')
text_brush = plots.make_html('Selected year range:')
kml_link = plots.make_html('KML:')
error_label = plots.make_html('Load a point')
###### Plots ######
# Scales
# Dates
lc1_x = plots.make_bq_scale('date', datetime.date(xlim.value[0], 2, 1),
datetime.date(xlim.value[1], 1, 1))
lc1_x2 = plots.make_bq_scale('date', datetime.date(xlim.value[0], 2, 1),
datetime.date(xlim.value[1], 1, 1))
# DOY
lc1_x3 = plots.make_bq_scale('linear', 0, 365)
# Reflectance
lc2_y = plots.make_bq_scale('linear', ylim.value[0], ylim.value[1])
lc2_y2 = plots.make_bq_scale('linear', ylim.value[0], ylim.value[1])
# plots
lc2 = plots.make_bq_plot('scatter', [], [], {'x': lc1_x, 'y': lc2_y},
[1, 1],
{'click': 'select', 'hover': 'tooltip'},
{'opacity': 1.0, 'fill': 'DarkOrange',
'stroke': 'Red'},
{'opacity': 0.5}, display_legend=True,
labels=['Sample point'])
lc3 = plots.make_bq_plot('scatter', [], [], {'x': lc1_x2, 'y': lc2_y2},
[1, 1],
{'click': 'select', 'hover': 'tooltip'},
{'opacity': 1.0, 'fill': 'DarkOrange',
'stroke': 'Red'},
{'opacity': 0.5}, display_legend=True,
labels=['Clicked point'])
lc4 = plots.make_bq_plot('lines', [], [], {'x': lc1_x, 'y': lc2_y}, [1, 1],
{}, {}, {}, colors=['black'], stroke_width=3,
labels=['PyCCD Model'], display_legend=False)
lc5 = plots.make_bq_plot('scatter', [], [], {'x': lc1_x, 'y': lc2_y},
[1, 1], {}, {}, {}, labels=['Model Endpoint'],
colors=['red'], marker='triangle-up')
lc6 = plots.make_bq_plot('lines', [], [], {'x': lc1_x2, 'y': lc2_y2},
[1, 1], {}, {}, {}, colors=['black'],
stroke_width=3, labels=['PyCCD Model'],
display_legend=False)
lc7 = plots.make_bq_plot('scatter', [], [], {'x': lc1_x2, 'y': lc2_y2},
[1, 1], {}, {}, {}, labels=['Model Endpoint'],
colors=['red'], marker='triangle-up')
lc8 = plots.make_bq_plot('scatter', [], [], {'x': lc1_x3, 'y': lc2_y},
[1, 1],
{'click': 'select', 'hover': 'tooltip'},
{'opacity': 1.0, 'fill': 'DarkOrange',
'stroke': 'Red'},
{'opacity': 0.5}, display_legend=True,
labels=['Sample point'])
# Axis
x_ax1 = plots.make_bq_axis('Date', lc1_x, num_ticks=6, tick_format='%Y',
orientation='horizontal')
x_ax2 = plots.make_bq_axis('Date', lc1_x2, num_ticks=6, tick_format='%Y',
orientation='horizontal')
x_ax3 = plots.make_bq_axis('DOY', lc1_x3, num_ticks=6,
orientation='horizontal')
y_ay1 = plots.make_bq_axis('SWIR1', lc2_y, orientation='vertical')
y_ay2 = plots.make_bq_axis('SWIR1', lc2_y2, orientation='vertical')
# Figures
fig = plots.make_bq_figure([lc2, lc4, lc5], [x_ax1, y_ay1],
{'height': '300px', 'width': '100%'},
'Sample TS')
fig2 = plots.make_bq_figure([lc3, lc6, lc7], [x_ax2, y_ay2],
{'height': '300px', 'width': '100%'},
'Clicked TS')
fig3 = plots.make_bq_figure([lc8], [x_ax3, y_ay1], {'height': '300px',
'width': '100%'}, 'Clicked TS')
###### Functions ######
# Reset dropdowns
def reset_drops():
measures.drop4.set_trait('options', ['Decision 5'])
measures.drop5.set_trait('options', ['Decision 6'])
measures.drop6.set_trait('options', ['Decision 7'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
measures.veg_selector.disabled = True
# Change dropdowns based on drop1 selection
def drop1_clicked(selection):
if selection.new == 'No':
measures.drop2.set_trait('options',
['>30% Vegetated?', 'Yes', 'No'])
measures.drop3.set_trait('options', ['Decision 4'])
elif selection.new == 'Yes':
measures.drop2.set_trait('options', ['Ice/Snow'])
measures.drop3.set_trait('options',
['No other information needed'])
measures.reset_drops()
# Change dropdowns based on drop2 selection
def drop2_clicked(selection):
if '>30% Vegetated?' in measures.drop2.options:
if selection.new == 'Yes':
# measures.drop3.set_trait('options',
# ['Density', 'Closed (60-70%)',
# 'Open (30-60%)', 'Sparse (<30%)'])
measures.drop3.set_trait('options',['Trees > 30%?', 'Yes','No'])
measures.veg_selector.disabled = False
# measures.drop4.set_trait('options', ['Woody vegetation', 'Yes', 'No'])
measures.drop4.set_trait('options', ['Decision 5'])
measures.drop5.set_trait('options', ['Decision 6'])
measures.drop6.set_trait('options', ['Decision 7'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
elif selection.new == 'No':
measures.drop3.set_trait('options', ['Dominant Cover', 'Water',
'Bare', 'Developed'])
measures.drop4.set_trait('options', ['Decision 5'])
measures.drop5.set_trait('options', ['Decision 6'])
measures.drop6.set_trait('options', ['Decision 7'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
measures.veg_selector.disabled = True
else:
measures.drop3.set_trait('options', ['No Other Information Needed'])
# Change dropdowns based on drop3 selection
def drop3_clicked(selection):
if 'Dominant Cover' in measures.drop3.options:
measures.veg_selector.disabled = True
if selection.new == 'Water':
water_opts = ['Water Type', 'Shore/Inter tidal', 'Shallows',
'River', 'Lake/Reservoir', 'Ocean']
measures.drop4.set_trait('options', water_opts)
measures.drop5.set_trait('options', ['Decision 6'])
measures.drop6.set_trait('options', ['Decision 7'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
elif selection.new == 'Bare':
bare_opts = ['Bare Type', 'Soil', 'Rock', 'Quarry (Active)',
'Beach/Sand']
measures.drop4.set_trait('options', bare_opts)
measures.drop5.set_trait('options', ['Decision 6'])
measures.drop6.set_trait('options', ['Decision 7'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
elif selection.new == 'Developed':
albedo_opts = ['Surface Albedo', 'High', 'Low', 'Mixed']
dev_use = ['Use', 'Residential', 'Commercial/Industrial']
bld_h_opts = ['Building Height', 'No Buildings', '1-2 Stories',
'3-5 Stories', '5+ Stories']
transp_opts = ['Transport', 'Road', 'Not Applicable']
imperv_opts = ['% Impervious', 'High (60-100)',
'Medium (30-60)', 'Low (<30)']
measures.drop4.set_trait('options', albedo_opts)
measures.drop5.set_trait('options', dev_use)
measures.drop6.set_trait('options', bld_h_opts)
measures.drop7.set_trait('options', transp_opts)
measures.drop8.set_trait('options', imperv_opts)
elif 'Trees > 30%?' in measures.drop3.options:
if selection.new == 'Yes': # Forest
measures.drop4.set_trait('options', ['Forest Phenology', 'Evergreen',
'Deciduous', 'Mixed'])
measures.drop5.set_trait('options', ['Leaf Type', 'Broad',
'Needle', 'Mixed',
'Unsure'])
measures.drop6.set_trait('options', ['Location', 'Interior',
'Edge'])
measures.drop7.set_trait('options',
['Density', 'Closed (60-70%)',
'Open (30-60%)', 'Sparse (<30%)'])
measures.drop8.set_trait('options',['Decision 9'])
else:
measures.drop4.set_trait('options', ['Shrubs >30%?', 'Yes','No'])
measures.drop5.set_trait('options', ['Decision 6'])
measures.drop6.set_trait('options', ['Decision 7'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
# Change dropdowns based on drop4 selection
def drop4_clicked(selection):
if 'Shrubs >30%?' in measures.drop4.options:
if selection.new == 'Yes': # Shrub
measures.drop5.set_trait('options', ['Shrub Phenology', 'Evergreen',
'Deciduous', 'Mixed'])
measures.drop6.set_trait('options',
['Density', 'Closed (60-70%)',
'Open (30-60%)', 'Sparse (<30%)'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
elif selection.new == 'No': # Herbaceous
measures.drop5.set_trait('options', ['Herbaceous Type',
'Grassland', 'Pasture',
'Row crops',
'Lawn/Urban Grass',
'Moss/Lichen'])
measures.drop6.set_trait('options', ['Decision 7'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
# Change dropdowns based on drop5 selection
def drop5_clicked(selection):
if 'Height >5m & Canopy >30%' in measures.drop5.options:
if selection.new == 'Yes':
measures.drop6.set_trait('options', ['Forest Type', 'Evergreen',
'Deciduous', 'Mixed'])
measures.drop7.set_trait('options', ['Leaf Type', 'Broad',
'Needle', 'Mixed',
'Unsure'])
measures.drop8.set_trait('options', ['Location', 'Interior',
'Edge'])
elif selection.new == 'No':
measures.drop6.set_trait('options', ['Shrub Type', 'Evergreen',
'Deciduous', 'Mixed'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
# Check validity of current sample
def check_val_status(selection):
selected_secondary_lc = False
wrote_correct_lc = False
if measures.second_class_drop.value != 'Secondary Class Information':
selected_secondary_lc = True
else:
print("Must specify secondary class information!")
if measures.lc.value.capitalize() == measures.textClass.value.capitalize():
wrote_correct_lc = True
if selected_secondary_lc and wrote_correct_lc:
measures.valid.value = True
measures.save_button.disabled = False
# load the feature collection, database, and google sheet
def load_everything(sender):
measures.sheet = sheets.load_sheet(measures.spreadName.value, 0,
measures.spreadsheet.value)
measures.sheet2 = sheets.load_sheet(measures.spreadName.value, 1,
measures.spreadsheet.value)
measures.sheet3 = sheets.load_sheet(measures.spreadName.value, 2,
measures.spreadsheet.value)
# Load the sample as a feature collection
sample_path = measures.sampleWidget.value
fc_df = utils.fc2dfgeo(sample_path)
measures.fc_df, first_index = utils.check_id(fc_df)
measures.valid_load.value = True
measures.valid_load.description = 'Loaded!'
measures.current_id = first_index
# If the class type is 'break', turn on necessary widgets
def turn_on_break_years(selection):
if selection.new == 'Break':
measures.break_years.disabled = False
measures.break_year.disabled = False
else:
measures.break_years.disabled = True
measures.break_year.disabled = True
# If segment is stable, disable LCC direction and change agent
def toggle_transitional_opts(selection):
if selection.new == "Transitional":
measures.direction.disabled = False
measures.change_selector.disabled = False
measures.change_other.disabled = False
measures.ca_confidence.disabled = False
elif selection.new == "Stable":
measures.direction.disabled = True
measures.change_selector.disabled = True
measures.change_other.disabled = True
measures.ca_confidence.disabled = True
# Change yaxis for the sample time series
def change_yaxis(value):
measures.lc2_y.min = measures.ylim.value[0]
measures.lc2_y.max = measures.ylim.value[1]
# Change xaxis for the sample time series
def change_xaxis(value):
measures.lc1_x.min = datetime.date(measures.xlim.value[0], 2, 1)
measures.lc1_x.max = datetime.date(measures.xlim.value[1], 2, 1)
# Change y axis for the clicked point
def change_yaxis2(value):
measures.lc2_y2.min = measures.ylim2.value[0]
measures.lc2_y2.max = measures.ylim2.value[1]
# Change x axis for the clicked point
def change_xaxis2(value):
measures.lc1_x2.min = datetime.date(measures.xlim2.value[0], 2, 1)
measures.lc1_x2.max = datetime.date(measures.xlim2.value[1], 2, 1)
# Display date of observation when hovering on scatterplot
def hover_event(self, target):
measures.hover_label.value = str(target['data']['x'])
# Advance to next sample
def advance(b):
measures.lc4.x = []
measures.lc4.y = []
measures.lc5.x = []
measures.lc5.y = []
measures.lc5.display_legend = False
measures.pyccd_flag = False
measures.current_id += 1
measures.pt_message.value = "Point ID: {}".format(measures.current_id)
measures.map_point()
measures.get_ts()
measures.plot_ts(measures.lc2, 'ts')
measures.plot_ts(measures.lc8, 'doy')
measures.reset_everything()
measures.click_train.set_trait('value', False)
measures.valid.value = False
measures.description = 'Not Saved'
# Go to previous sample
def decrease(b):
measures.lc4.x = []
measures.lc4.y = []
measures.lc5.x = []
measures.lc5.y = []
measures.lc5.display_legend = False
measures.pyccd_flag = False
measures.current_id -= 1
measures.pt_message.value = "Point ID: {}".format(measures.current_id)
measures.map_point()
measures.get_ts()
#measures.plot_ts()
measures.plot_ts(measures.lc2, 'ts')
measures.plot_ts(measures.lc8, 'doy')
measures.reset_everything()
measures.click_train.set_trait('value', False)
measures.valid.value = False
measures.description = 'Not Saved'
# Go to a specific sample
def go_to_sample(b):
measures.lc4.x = []
measures.lc4.y = []
measures.lc5.x = []
measures.lc5.y = []
measures.lc5.display_legend = False
measures.pyccd_flag = False
measures.current_id = int(b.value)
measures.pt_message.value = "Point ID: {}".format(measures.current_id)
measures.valid.value = False
measures.description = 'Not Saved'
measures.map_point()
measures.get_ts()
#measures.plot_ts()
measures.plot_ts(measures.lc2, 'ts')
measures.plot_ts(measures.lc8, 'doy')
measures.reset_everything()
measures.click_train.set_trait('value', False)
measures.valid.value = False
measures.description = 'Not Saved'
# Return to sample location
def return_to_sample(b):
measures.map_point()
measures.get_ts()
measures.plot_ts(measures.lc2, 'ts')
measures.plot_ts(measures.lc8, 'doy')
# Functions for changing image stretch
def change_image_band1(change):
new_band = change['new']
measures.b1 = new_band
def change_image_band2(change):
new_band = change['new']
measures.b2 = new_band
def change_image_band3(change):
new_band = change['new']
measures.b3 = new_band
# Band selection for sample point
def on_band_selection1(change):
band_index = change['owner'].index
measures.band_index1 = band_index
new_band = change['new']
measures.y_ay1.label = new_band
#measures.plot_ts()
measures.plot_ts(measures.lc2, 'ts')
measures.plot_ts(measures.lc8, 'doy')
# Band selection for clicked point
def on_band_selection2(change):
new_band = change['new']
band_index = change['owner'].index
measures.band_index2 = band_index
measures.lc3.x = measures.click_df['datetime'].values
measures.lc3.y = measures.click_df[new_band]
#measures.plot_ts(measures.lc3, 'ts')
measures.y_ay2.label = new_band
if measures.pyccd_flag2:
measures.do_pyccd2(0)
# Clear everything on map besides current sample
def clear_map(b):
lft.clear_map(measures.m, streets=True)
if hasattr(measures, 'fc_df'):
measures.map_point()
# Add an image to the map when clicked on time series
def add_image(self, target):
m = measures.m
df = measures.sample_df
current_band = measures.band_list[measures.band_index1]
sample_col = measures.sample_col
stretch_min = measures.stretch_min.value
stretch_max = measures.stretch_max.value
b1 = measures.b1
b2 = measures.b2
b3 = measures.b3
lft.click_event(target, m, current_band, df, sample_col, stretch_min,
stretch_max, b1, b2, b3)
def add_image2(self, target):
m = measures.m
df = measures.click_df
current_band = measures.band_list[measures.band_index2]
sample_col = measures.click_col
stretch_min = measures.stretch_min.value
stretch_max = measures.stretch_max.value
b1 = measures.b1
b2 = measures.b2
b3 = measures.b3
lft.click_event(target, m, current_band, df, sample_col, stretch_min,
stretch_max, b1, b2, b3)
# Plot ts for point
def do_draw(self, action, geo_json):
current_band = measures.band_list[measures.band_index2]
doy_range = measures.doy_range
_col, _df = utils.handle_draw(action, geo_json, current_band,
list(measures.xlim2.value), doy_range)
measures.click_geojson = geo_json
coord1 = measures.click_geojson['geometry']['coordinates'][0]
coord2 = measures.click_geojson['geometry']['coordinates'][1]
measures.click_coord_message.value = "Click Lat, Lon: {}, {}".format(coord2, coord1)
measures.click_df = _df
measures.click_col = _col
# Disable ts collection checkbox but calculate box in the background
measures.click_train.value = False
measures.click_trainbox = utils.calculate_clicked_bbox(geo_json)
measures.lc6.x = []
measures.lc6.y = []
measures.lc7.x = []
measures.lc7.y = []
measures.lc3.x = measures.click_df['datetime'].values
measures.lc3.y = measures.click_df[current_band]
measures.valid.value = False
measures.description = 'Not Saved'
if measures.color_check.value is False:
measures.lc3.colors = list(measures.point_color)
else:
measures.lc3.colors = list(measures.click_df['color'].values)
# Add point location to map
def map_point():
zoom = int(measures.zoom_box.value)
kml = measures.kml_link
name = 'Sample point'
measures.samplept_geojson = measures.fc_df['geometry'][measures.current_id]
coord1 = measures.samplept_geojson['coordinates'][0]
coord2 = measures.samplept_geojson['coordinates'][1]
measures.sample_coord_message.value = "Sample Lat, Lon: {}, {}".format(coord2, coord1)
lft.add_map_point(measures.samplept_geojson, zoom, measures.m, kml, name)
def go_to_lat_lon(b):
zoom = int(measures.zoom_box.value)
_longitude = float(measures.go_to_lon.value)
_latitude = float(measures.go_to_lat.value)
kml = measures.kml_link
name = 'Lat/Lon point'
ll_geo=shapely.geometry.Point(_longitude,_latitude)
ll_geojson = shapely.geometry.mapping(ll_geo)
measures.samplept_geojson = ll_geojson
coord1 = measures.samplept_geojson['coordinates'][0]
coord2 = measures.samplept_geojson['coordinates'][1]
measures.sample_coord_message.value = "Lat, Lon: {}, {}".format(coord2, coord1)
lft.add_map_point(measures.samplept_geojson, zoom, measures.m, kml, name)
# Get time series data for location.
def get_ts():
measures.error_label.value = 'Loading'
coords = measures.fc_df['geometry'][measures.current_id]['coordinates']
doy_range = measures.doy_range
measures.current_band = measures.band_list[measures.band_index1]
measures.sample_col = utils.get_full_collection(coords, list(measures.xlim.value),
doy_range)
measures.sample_df = utils.get_df_full(measures.sample_col,
coords).dropna()
measures.error_label.value = 'Point Loaded!'
# Add time series data to plots
def plot_ts(plot, plottype):
df = measures.sample_df
if measures.color_check.value is True:
color_marks = list(measures.sample_df['color'].values)
else:
color_marks = None
band = measures.band_list[measures.band_index1]
if plottype == 'ts':
plots.add_plot_ts(df, plot, band=band, color_marks=color_marks)
else:
plots.add_plot_doy(df, plot, band=band, color_marks=color_marks)
if measures.pyccd_flag:
measures.do_pyccd(0)
# Run pyccd for the sample location
def do_pyccd(b):
measures.pyccd_flag = True
display_legend = measures.lc5.display_legend
dfPyCCD = measures.sample_df
band_index = measures.band_index1
results = ccd_tools.run_pyccd(display_legend, dfPyCCD, band_index)
if band_index > 5:
measures.lc4.y = []
measures.lc4.x = []
measures.lc4.y = []
measures.lc5.x = []
measures.lc5.display_legend = False
return
else:
ccd_tools.plot_pyccd(dfPyCCD, results, band_index, (0, 4000),
measures.lc4, measures.lc5)
measures.lc5.display_legend = True
# Run pyccd for the clicked location
def do_pyccd2(b):
measures.pyccd_flag2 = True
display_legend = measures.lc7.display_legend
dfPyCCD = measures.click_df
band_index = measures.band_index2
results = ccd_tools.run_pyccd(display_legend, dfPyCCD, band_index)
if band_index > 5:
measures.lc6.y = []
measures.lc6.x = []
measures.lc7.y = []
measures.lc7.x = []
measures.lc7.display_legend = False
return
else:
ccd_tools.plot_pyccd(dfPyCCD, results, band_index, (0, 4000),
measures.lc6, measures.lc7)
measures.lc7.display_legend = True
# Clear pyccd results
def clear_pyccd(b):
measures.lc4.x = []
measures.lc5.y = []
def clear_pyccd2(b):
measures.lc6.x = []
measures.lc7.y = []
# Save sample
def save_sample():
# Connect to the database
#conn = sqlite3.connect(measures.dbPath)
#c = conn.cursor()
# Get everything in right format
year1 = measures.years.value[0]
year2 = measures.years.value[1]
waterType = 'N/A'
bareType = 'N/A'
albedo = 'N/A'
use = 'N/A'
height = 'N/A'
transport = 'N/A'
impervious = 'N/A'
density = 'N/A'
vegType1 = 'N/A'
seg_notes = 'N/A'
herbaceousType = 'N/A'
shrubType = 'N/A'
forestPhenology = 'N/A'
leafType = 'N/A'
b_notes_value = 'N/A'
b_changeAgent = 'N/A'
b_ca_other = 'N/A'
b_confCA = 'N/A'
b_direction = 'N/A'
location = 'N/A'
coverType = measures.drop0.value
# Segment type
seg_type = measures.drop9.value
direction = measures.direction.value
direction = [str(i) for i in direction]
direction = ', '.join(direction)
changeAgent = measures.change_selector.value
changeAgent = [str(i) for i in changeAgent]
changeAgent = ', '.join(changeAgent)
confCA = measures.ca_confidence.value
ca_other = measures.change_other.value
#
if ca_other == 'Specify other':
ca_other = 'N/A'
seg_notes = measures.notes_seg_trend.value
# Break
condition = measures.break_check.value
if condition:
condition = 'Break'
else:
condition = 'No Break'
b_changeAgent = measures.b_change_selector.value
b_changeAgent = [str(i) for i in b_changeAgent]
b_changeAgent = ', '.join(b_changeAgent)
break_year = measures.break_year.value
break_range1 = measures.break_years.value[0]
break_range2 = measures.break_years.value[1]
b_confCA = measures.b_ca_confidence.value
b_ca_other = measures.b_change_other.value
if b_ca_other == 'Specify other':
b_ca_other = 'N/A'
b_direction = measures.b_direction.value
b_direction = [str(i) for i in b_direction]
b_direction = ', '.join(b_direction)
class1 = 'Unfilled'
# Ice/Snow
if measures.drop1.value == 'Yes':
class1 = 'Snow/Ice'
else:
if measures.drop2.value == 'No': #Non-Veg
class1 = measures.drop3.value
if class1 == 'Water':
waterType = measures.drop4.value
elif class1 == 'Bare':
bareType = measures.drop4.value
else:
albedo = measures.drop4.value
use = measures.drop5.value
height = measures.drop6.value
transport = measures.drop7.value
impervious = measures.drop8.value
elif measures.drop2.value == 'Yes': #Veg
vegType1 = measures.veg_selector.value
vegType1 = [str(i) for i in vegType1]
vegType1 = ', '.join(vegType1)
if measures.drop3.value == 'Yes':
class1 = 'Forest'
forestPhenology = measures.drop4.value
leafType = measures.drop5.value
location = measures.drop6.value
density = measures.drop7.value
elif measures.drop4.value == 'Yes':
class1 = 'Shrub'
shrubType = measures.drop5.value
density = measures.drop6.value
elif measures.drop4.value == 'No':
class1 = 'Herbaceous'
herbaceousType = measures.drop5.value
conf = measures.confidence.value
notes_value = measures.notes.value
b_notes_value = measures.notes_break.value
# Get coordinates depending on source
if measures.click_train.value:
idSample = 0
lat = measures.click_geojson['geometry']['coordinates'][1]
lon = measures.click_geojson['geometry']['coordinates'][0]
else:
idSample = measures.current_id
lat = measures.samplept_geojson['coordinates'][1]
lon = measures.samplept_geojson['coordinates'][0]
sampleInput = (idSample, lat, lon, year1, year2, coverType,
class1, waterType, bareType, albedo, use, height,
transport, impervious, density,
herbaceousType, shrubType, forestPhenology, leafType,
location, vegType1, conf, notes_value, seg_type,
direction, changeAgent, confCA, ca_other, seg_notes,
break_year, break_range1, break_range2)
# Put sample information into database
#c.execute("""insert into measures
# values {i}""".format(i=sampleInput))
#
# Save (commit) the changes
#conn.commit()
# Close the cursor
#c.close()
# Save to drive
sampleInputList = [str(idSample), str(lat), str(lon), str(year1),
str(year2), coverType,
class1, waterType, bareType, albedo,
use, height, transport, impervious, density,
herbaceousType, shrubType, forestPhenology,
leafType, location, vegType1, str(conf),
notes_value, seg_type, direction, changeAgent,
str(confCA), ca_other, seg_notes]
sampleInputListFull = sampleInputList
# Save break information to second sheet
if condition == 'Break':
breakList = [str(idSample), str(lat), str(lon), b_changeAgent,
b_ca_other, b_confCA, break_year, break_range1,
break_range2, b_direction, b_notes_value]
count = len(measures.sheet2.col_values(1))
measures.sheet2.insert_row(breakList, 2)
time.sleep(3)
count_new = len(measures.sheet2.col_values(1))
elif measures.click_train.value:
count = len(measures.sheet3.col_values(1))
measures.sheet3.insert_row(sampleInputListFull, 2)
count_new = len(measures.sheet3.col_values(1))
else:
count = len(measures.sheet.col_values(1))
measures.sheet.insert_row(sampleInputListFull, 2)
time.sleep(3)
count_new = len(measures.sheet.col_values(1))
if count_new > count:
# Change save validity state
if condition == 'Break':
measures.b_valid.value = True
measures.b_valid.description = 'Saved!'
else:
measures.valid.value = True
measures.valid.description = 'Saved!'
measures.reset_everything()
measures.click_train.set_trait('value', False)
else:
time.sleep(10)
if condition == 'Break':
count_new = len(measures.sheet2.col_values(1))
else:
count_new = len(measures.sheet.col_values(1))
if count_new > count:
# Change save validity state
measures.valid.value = True
measures.valid.description = 'Saved!'
measures.reset_everything()
measures.click_train.set_trait('value', False)
# Reset all widgets
def reset_everything():
# Land cover
measures.drop0.set_trait('value', 'Dominant or Secondary?')
measures.drop1.set_trait('value', 'Persistant Ice?')
measures.drop2.set_trait('options', ['Decision 3'])
measures.drop3.set_trait('options', ['Decision 4'])
measures.drop4.set_trait('options', ['Decision 5'])
measures.drop5.set_trait('options', ['Decision 6'])
measures.drop6.set_trait('options', ['Decision 7'])
measures.drop7.set_trait('options', ['Decision 8'])
measures.drop8.set_trait('options', ['Decision 9'])
measures.veg_selector.set_trait('value', ('Select a modifier',))
measures.veg_selector.disabled = True
measures.years.set_trait('value', [1990, 1991])
measures.confidence.set_trait('value', 0)
# Segment attrs
measures.drop9.set_trait('value', 'Select type')
measures.direction.set_trait('value', ('NA',))
measures.change_selector.set_trait('value', ('None',))
measures.notes.value = 'Enter any useful or interesting information \
about the land cover of this sample'
measures.ca_confidence.set_trait('value', 0)
measures.notes_seg_trend.value = 'Enter any useful or interesting \
information about the Time Trend of \
this sample'
# Break
measures.break_check.value = False
measures.break_year.set_trait('value', 1991)
measures.break_years.set_trait('value', [1990, 1991])
measures.b_direction.set_trait('value', ('NA',))
measures.b_change_selector.set_trait('value', ('None',))
measures.b_change_other.set_trait('value', 'Specify other')
measures.b_ca_confidence.set_trait('value', 0)
measures.notes_break.value = 'Enter any useful or interesting \
information about the Break in the \
time series'
# Interaction function for saving sample
def do_save_sample(b):
measures.save_sample()
#measures.change_table(0)
measures.reset_everything()
# Activate break widgets
def do_activate_break(b):
if b.new:
measures.break_year.disabled = False
measures.break_years.disabled = False
measures.b_direction.disabled = False
measures.b_change_selector.disabled = False
measures.b_change_other.disabled = False
measures.b_ca_confidence.disabled = False
measures.notes_break.disabled = False
measures.b_save_button.disabled = False
else:
measures.b_save_button.disabled = True
measures.break_year.disabled = True
measures.break_years.disabled = True
measures.b_direction.disabled = True
measures.b_change_selector.disabled = True
measures.b_change_other.disabled = True
measures.b_ca_confidence.disabled = True
measures.notes_break.disabled = True
# Enable collection of TS box as training data
def enable_ts_collection(b):
if b.new:
# TODO: Create geoJSON and test with that class?
if isinstance(measures.click_geojson, dict):
measures.box_geojson = ipyleaflet.GeoJSON(data=measures.click_trainbox.getInfo(),
style={'color': 'black'},
name='TS train box')
measures.m.add_layer(measures.box_geojson)
measures.reset_everything()
else:
measures.click_train.set_trait('value', False)
else:
if isinstance(measures.click_geojson, dict):
# TODO: zoom to the clicked point
measures.m.remove_layer(measures.box_geojson)
####### Widget Interactions #######
click_train.observe(enable_ts_collection, 'value')
break_check.observe(do_activate_break, 'value')
return_button.on_click(return_to_sample)
save_button.on_click(do_save_sample)
b_save_button.on_click(do_save_sample)
validate.on_click(check_val_status)
drop1.observe(drop1_clicked, 'value')
drop2.observe(drop2_clicked, 'value')
drop3.observe(drop3_clicked, 'value')
drop4.observe(drop4_clicked, 'value')
drop5.observe(drop5_clicked, 'value')
drop9.observe(toggle_transitional_opts, 'value')
load_button.on_click(load_everything)
dc = ipyleaflet.DrawControl(marker={'shapeOptions': {'color': '#ff0000'}},
polygon={}, circle={}, circlemarker={},
polyline={})
zoom = 5
layout = widgets.Layout(width='50%')
center = (3.3890701010382958, -67.32297252983098)
m = lft.make_map(zoom, layout, center)
lft.add_basemap(m, ipyleaflet.basemaps.Esri.WorldImagery)
# Display controls
ylim.observe(change_yaxis)
xlim.observe(change_xaxis)
ylim2.observe(change_yaxis2)
xlim2.observe(change_xaxis2)
clear_layers.on_click(clear_map)
band_selector1.observe(on_band_selection1, names='value')
band_selector2.observe(on_band_selection2, names='value')
image_band_1.observe(change_image_band1, names='value')
image_band_2.observe(change_image_band2, names='value')
image_band_3.observe(change_image_band3, names='value')
# Samples
next_pt.on_click(advance)
previous_pt.on_click(decrease)
# pyccd
pyccd_button.on_click(do_pyccd)
pyccd_button2.on_click(do_pyccd2)
toggle_pyccd_button.on_click(clear_pyccd)
toggle_pyccd_button2.on_click(clear_pyccd2)
# Plots
lc2.on_element_click(add_image)
lc2.tooltip = hover_label
lc2.on_hover(hover_event)
lc3.on_element_click(add_image2)
lc3.tooltip = hover_label
lc3.on_hover(hover_event)
idBox.on_submit(go_to_sample)
go_to_lat.on_submit(go_to_lat_lon)
go_to_lon.on_submit(go_to_lat_lon)
# Mapping
measure = ipyleaflet.MeasureControl(position='topleft',
active_color = 'orange',
primary_length_unit = 'kilometers')
measure.completed_color = 'red'
dc.on_draw(do_draw)
m.add_control(dc)
m.add_control(measure)
m.add_control(ipyleaflet.LayersControl())
| 43.952709 | 103 | 0.555158 |
7944d37fa24963a658faaa26e144329dc24355dd | 167 | py | Python | v1.0.0.test/libotp/nametag/_constants.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 4 | 2019-07-01T15:46:43.000Z | 2021-07-23T16:26:48.000Z | v1.0.0.test/libotp/nametag/_constants.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 1 | 2019-06-29T03:40:05.000Z | 2021-06-13T01:15:16.000Z | v1.0.0.test/libotp/nametag/_constants.py | TTOFFLINE-LEAK/ttoffline | bb0e91704a755d34983e94288d50288e46b68380 | [
"MIT"
] | 4 | 2019-07-28T21:18:46.000Z | 2021-02-25T06:37:25.000Z | CFSpeech = 1
CFThought = 2
CFQuicktalker = 4
CFTimeout = 8
CFPageButton = 16
CFQuitButton = 32
CFReversed = 64
CFSndOpenchat = 128
CFNoQuitButton = 256
CFExclaim = 512 | 16.7 | 20 | 0.766467 |
7944d3a7d2fb01aa0b87f1127e68f5f9ee3d10eb | 26,979 | py | Python | pysnmp-with-texts/CISCO-DIAMETER-CC-APPL-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCO-DIAMETER-CC-APPL-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCO-DIAMETER-CC-APPL-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-DIAMETER-CC-APPL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-DIAMETER-CC-APPL-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:54:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint")
ciscoExperiment, = mibBuilder.importSymbols("CISCO-SMI", "ciscoExperiment")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
Counter64, ObjectIdentity, MibIdentifier, Bits, ModuleIdentity, Integer32, NotificationType, TimeTicks, Gauge32, iso, Unsigned32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "ObjectIdentity", "MibIdentifier", "Bits", "ModuleIdentity", "Integer32", "NotificationType", "TimeTicks", "Gauge32", "iso", "Unsigned32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32")
StorageType, RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "StorageType", "RowStatus", "DisplayString", "TextualConvention")
ciscoDiameterCCAMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 10, 575))
ciscoDiameterCCAMIB.setRevisions(('2006-08-23 00:01',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoDiameterCCAMIB.setRevisionsDescriptions(('Initial version of this module.',))
if mibBuilder.loadTexts: ciscoDiameterCCAMIB.setLastUpdated('200608230001Z')
if mibBuilder.loadTexts: ciscoDiameterCCAMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoDiameterCCAMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoDiameterCCAMIB.setDescription("The MIB module for entities implementing the Diameter Credit Control Application, RFC 4006. Initial Cisco'ized version of the IETF draft draft-zorn-dime-diameter-cc-app-mib-00.txt.")
ciscoDiameterCCAMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 575, 0))
ciscoDiameterCCAMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 575, 1))
ciscoDiameterCCAMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 575, 2))
cdccaHostCfgs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 1))
cdccaPeerCfgs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2))
cdccaPeerStats = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3))
cdccaHostId = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 1, 1), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaHostId.setStatus('current')
if mibBuilder.loadTexts: cdccaHostId.setDescription("The implementation identification string for the Diameter software in use on the system, for example; 'diameterd'")
cdccaHostIpAddrTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 1, 2), )
if mibBuilder.loadTexts: cdccaHostIpAddrTable.setStatus('current')
if mibBuilder.loadTexts: cdccaHostIpAddrTable.setDescription("The table listing the Diameter Credit Control host's IP Addresses.")
cdccaHostIpAddrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-DIAMETER-CC-APPL-MIB", "cdccaHostIpAddrIndex"))
if mibBuilder.loadTexts: cdccaHostIpAddrEntry.setStatus('current')
if mibBuilder.loadTexts: cdccaHostIpAddrEntry.setDescription('A row entry representing a Diameter Credit Control host IP Address.')
cdccaHostIpAddrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 1, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cdccaHostIpAddrIndex.setStatus('current')
if mibBuilder.loadTexts: cdccaHostIpAddrIndex.setDescription('A number uniquely identifying the number of IP Addresses supported by this Diameter Credit Control host.')
cdccaHostIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 1, 2, 1, 2), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaHostIpAddrType.setStatus('current')
if mibBuilder.loadTexts: cdccaHostIpAddrType.setDescription('The type of internet address stored in cdccaHostIpAddress.')
cdccaHostIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 1, 2, 1, 3), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaHostIpAddress.setStatus('current')
if mibBuilder.loadTexts: cdccaHostIpAddress.setDescription('The IP-Address of the host, which is of the type specified in cdccaHostIpAddrType.')
cdccaPeerTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 1), )
if mibBuilder.loadTexts: cdccaPeerTable.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerTable.setDescription('The table listing information regarding the discovered or configured Diameter Credit Control peers.')
cdccaPeerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerIndex"))
if mibBuilder.loadTexts: cdccaPeerEntry.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerEntry.setDescription('A row entry representing a discovered or configured Diameter Credit Control peer.')
cdccaPeerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cdccaPeerIndex.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerIndex.setDescription('An index that uniquely identifies a dcca peer. This index is assigned arbitrarily by the SNMP engine and is not saved over reloads.')
cdccaPeerId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 1, 1, 2), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdccaPeerId.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerId.setDescription('The server identifier for the Diameter Credit Control peer. It has to be unique and not an empty string.')
cdccaPeerFirmwareRevision = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerFirmwareRevision.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerFirmwareRevision.setDescription('Firmware revision of peer. If no firmware revision, the revision of the Diameter Credit Control software module may be reported instead.')
cdccaPeerStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 1, 1, 4), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdccaPeerStorageType.setReference('Textual Conventions for SMIv2, Section 2.')
if mibBuilder.loadTexts: cdccaPeerStorageType.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStorageType.setDescription('The storage type for this conceptual row. None of the columnar objects is writable when the conceptual row is permanent.')
cdccaPeerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdccaPeerRowStatus.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerRowStatus.setDescription("The status of this conceptual row. To create a row in this table, a manager must set this object to either createAndGo(4) or createAndWait(5). Until instances of all corresponding columns are appropriately configured, the value of the corresponding instance of the cdccaPeerRowStatus column is 'notReady'. In particular, a newly created row cannot be made active until the corresponding cdccaPeerId has been set. cdccaPeerId may not be modified while the value of this object is active(1): An attempt to set these objects while the value of cdccaPeerRowStatus is active(1) will result in an inconsistentValue error. Entries in this table with cdccaPeerRowStatus equal to active(1) remain in the table until destroyed. Entries in this table with cdccaPeerRowStatus equal to values other than active(1) will be destroyed after timeout (5 minutes). If a cdccaPeerId being created via SNMP already exists in another active cdccaPeerEntry, then a newly created row cannot be made active until the original row with the with the cdccaPeerId value is destroyed. Upon reload, cdccaPeerIndex values may be changed.")
cdccaPeerVendorTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 2), )
if mibBuilder.loadTexts: cdccaPeerVendorTable.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerVendorTable.setDescription('The table listing the Vendor IDs supported by the peer.')
cdccaPeerVendorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 2, 1), ).setIndexNames((0, "CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerIndex"), (0, "CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerVendorIndex"))
if mibBuilder.loadTexts: cdccaPeerVendorEntry.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerVendorEntry.setDescription('A row entry representing a Vendor ID supported by the peer.')
cdccaPeerVendorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cdccaPeerVendorIndex.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerVendorIndex.setDescription('A number uniquely identifying the Vendor ID supported by the peer.')
cdccaPeerVendorId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 2, 1, 2), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdccaPeerVendorId.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerVendorId.setDescription('The active Vendor IDs used for peer connections.')
cdccaPeerVendorStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 2, 1, 3), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdccaPeerVendorStorageType.setReference('Textual Conventions for SMIv2, Section 2.')
if mibBuilder.loadTexts: cdccaPeerVendorStorageType.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerVendorStorageType.setDescription('The storage type for this conceptual row. An agent implementing the table must allow adding cdccaPeerVendorId into the table. None of the columnar objects is writable when the conceptual row is permanent.')
cdccaPeerVendorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 2, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdccaPeerVendorRowStatus.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerVendorRowStatus.setDescription("The status of this conceptual row. To create a row in this table, a manager must set this object to either createAndGo(4) or createAndWait(5). Until instances of all corresponding columns are appropriately configured, the value of the corresponding instance of the cdccaPeerVendorRowStatus column is 'notReady'. In particular, a newly created row cannot be made active until the corresponding cdccaPeerVendorId has been set. cdccaPeerVendorId may not be modified while the value of this object is active(1): An attempt to set these objects while the value of cdccaPeerVendorRowStatus is active(1) will result in an inconsistentValue error. Entries in this table with cdccaPeerVendorRowStatus equal to active(1) remain in the table until destroyed. Entries in this table with cdccaPeerVendorRowStatus equal to values other than active(1) will be destroyed after timeout (5 minutes). If the peer vendor id being created via SNMP already exists in another active cdccaPeerVendorEntry, then a newly created row cannot be made active until the original row with the peer vendor id value is destroyed. Upon reload, cdccaPeerVendorIndex values may be changed.")
cdccaPeerStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1), )
if mibBuilder.loadTexts: cdccaPeerStatsTable.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsTable.setDescription('The table listing the Diameter Credit Control Peer Statistics.')
cdccaPeerStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerIndex"))
if mibBuilder.loadTexts: cdccaPeerStatsEntry.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsEntry.setDescription('A row entry representing a Diameter Credit Control Peer.')
cdccaPeerStatsCCRIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 1), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsCCRIn.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsCCRIn.setDescription('Number of Diameter Credit-Control-Request (CCR) messages received, per peer.')
cdccaPeerStatsCCROut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 2), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsCCROut.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsCCROut.setDescription('Number of Diameter Credit-Control-Request (CCR) messages sent, per peer.')
cdccaPeerStatsCCRDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 3), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsCCRDropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsCCRDropped.setDescription('Number of Diameter Credit-Control-Request (CCR) messages dropped, per peer.')
cdccaPeerStatsCCAIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 4), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsCCAIn.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsCCAIn.setDescription('Number of Diameter Credit-Control-Answer (CCA) messages received, per peer.')
cdccaPeerStatsCCAOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 5), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsCCAOut.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsCCAOut.setDescription('Number of Diameter Credit-Control-Answer (CCA) messages sent, per peer.')
cdccaPeerStatsCCADropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 6), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsCCADropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsCCADropped.setDescription('Number of Diameter Credit-Control-Answer (CCA) messages dropped, per peer.')
cdccaPeerStatsRARIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 7), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsRARIn.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsRARIn.setDescription('Number of Diameter Re-Auth-Request (RAR) messages received, per peer.')
cdccaPeerStatsRARDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 8), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsRARDropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsRARDropped.setDescription('Number of Diameter Re-Auth-Request (RAR) messages dropped, per peer.')
cdccaPeerStatsRAAOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 9), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsRAAOut.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsRAAOut.setDescription('Number of Diameter Re-Auth-Answer (RAA) messages transmitted, per peer.')
cdccaPeerStatsRAADropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 10), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsRAADropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsRAADropped.setDescription('Number of Diameter Re-Auth-Answer (RAA) messages dropped, per peer.')
cdccaPeerStatsSTROut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 11), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsSTROut.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsSTROut.setDescription('Number of Diameter Session-Termination-Request (STR) messages transmitted, per peer.')
cdccaPeerStatsSTRDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 12), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsSTRDropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsSTRDropped.setDescription('Number of Diameter Session-Termination-Request (STR) messages dropped, per peer.')
cdccaPeerStatsSTAIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 13), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsSTAIn.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsSTAIn.setDescription('Number of Diameter Session-Termination-Answer (STA) messages received, per peer.')
cdccaPeerStatsSTADropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 14), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsSTADropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsSTADropped.setDescription('Number of Diameter Session-Termination-Answer (STA) messages dropped, per peer.')
cdccaPeerStatsAAROut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 15), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsAAROut.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsAAROut.setDescription('Number of Diameter AA-Request (AAR) messages transmitted, per peer.')
cdccaPeerStatsAARDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 16), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsAARDropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsAARDropped.setDescription('Number of Diameter AA-Request (AAR) messages dropped, per peer.')
cdccaPeerStatsAAAIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 17), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsAAAIn.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsAAAIn.setDescription('Number of Diameter AA-Answer (AAA) messages received, per peer.')
cdccaPeerStatsAAADropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 18), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsAAADropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsAAADropped.setDescription('Number of Diameter AA-Answer (AAA) messages dropped, per peer.')
cdccaPeerStatsASRIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 19), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsASRIn.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsASRIn.setDescription('Number of Diameter Abort-Session-Request (ASR) messages received, per peer.')
cdccaPeerStatsASRDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 20), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsASRDropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsASRDropped.setDescription('Number of Diameter Abort-Session-Request (ASR) messages dropped, per peer.')
cdccaPeerStatsASAOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 21), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsASAOut.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsASAOut.setDescription('Number of Diameter Abort-Session-Answer (ASA) messages transmitted, per peer.')
cdccaPeerStatsASADropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 575, 1, 3, 1, 1, 22), Counter32()).setUnits('messages').setMaxAccess("readonly")
if mibBuilder.loadTexts: cdccaPeerStatsASADropped.setStatus('current')
if mibBuilder.loadTexts: cdccaPeerStatsASADropped.setDescription('Number of Diameter Abort-Session-Answer (ASA) messages dropped, per peer.')
ciscoDiameterCCAMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 575, 2, 1))
ciscoDiameterCCAMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 575, 2, 2))
ciscoDiameterCCAMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 575, 2, 1, 1)).setObjects(("CISCO-DIAMETER-CC-APPL-MIB", "ciscoDiameterCCAPeerStatsGroup"), ("CISCO-DIAMETER-CC-APPL-MIB", "ciscoDiameterCCAHostCfgGroup"), ("CISCO-DIAMETER-CC-APPL-MIB", "ciscoDiameterCCAPeerCfgGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDiameterCCAMIBCompliance = ciscoDiameterCCAMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: ciscoDiameterCCAMIBCompliance.setDescription('The compliance statement for Diameter Credit Control application entities.')
ciscoDiameterCCAHostCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 575, 2, 2, 1)).setObjects(("CISCO-DIAMETER-CC-APPL-MIB", "cdccaHostIpAddrType"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaHostIpAddress"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaHostId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDiameterCCAHostCfgGroup = ciscoDiameterCCAHostCfgGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoDiameterCCAHostCfgGroup.setDescription('A collection of objects providing host configuration common to the server.')
ciscoDiameterCCAPeerCfgGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 575, 2, 2, 2)).setObjects(("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerId"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerVendorId"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStorageType"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerVendorStorageType"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerFirmwareRevision"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerRowStatus"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerVendorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDiameterCCAPeerCfgGroup = ciscoDiameterCCAPeerCfgGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoDiameterCCAPeerCfgGroup.setDescription('A collection of objects providing peer configuration common to the server.')
ciscoDiameterCCAPeerStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 575, 2, 2, 3)).setObjects(("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsCCRIn"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsCCROut"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsCCRDropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsCCAIn"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsCCAOut"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsCCADropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsRARIn"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsRARDropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsRAAOut"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsRAADropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsSTROut"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsSTRDropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsSTAIn"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsSTADropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsAAROut"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsAARDropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsAAAIn"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsAAADropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsASRIn"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsASRDropped"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsASAOut"), ("CISCO-DIAMETER-CC-APPL-MIB", "cdccaPeerStatsASADropped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDiameterCCAPeerStatsGroup = ciscoDiameterCCAPeerStatsGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoDiameterCCAPeerStatsGroup.setDescription('A collection of objects providing peer statistics common to the server.')
mibBuilder.exportSymbols("CISCO-DIAMETER-CC-APPL-MIB", ciscoDiameterCCAMIBCompliances=ciscoDiameterCCAMIBCompliances, cdccaPeerStatsCCAIn=cdccaPeerStatsCCAIn, cdccaPeerEntry=cdccaPeerEntry, cdccaPeerStatsAAADropped=cdccaPeerStatsAAADropped, PYSNMP_MODULE_ID=ciscoDiameterCCAMIB, cdccaPeerStatsRARDropped=cdccaPeerStatsRARDropped, cdccaPeerStatsRAADropped=cdccaPeerStatsRAADropped, cdccaPeerStatsAARDropped=cdccaPeerStatsAARDropped, cdccaPeerStorageType=cdccaPeerStorageType, cdccaHostCfgs=cdccaHostCfgs, cdccaHostIpAddrEntry=cdccaHostIpAddrEntry, cdccaHostIpAddress=cdccaHostIpAddress, cdccaPeerVendorRowStatus=cdccaPeerVendorRowStatus, cdccaPeerCfgs=cdccaPeerCfgs, ciscoDiameterCCAMIB=ciscoDiameterCCAMIB, ciscoDiameterCCAMIBConform=ciscoDiameterCCAMIBConform, cdccaHostIpAddrIndex=cdccaHostIpAddrIndex, cdccaPeerVendorTable=cdccaPeerVendorTable, cdccaPeerStatsSTRDropped=cdccaPeerStatsSTRDropped, cdccaPeerStatsASAOut=cdccaPeerStatsASAOut, ciscoDiameterCCAMIBObjects=ciscoDiameterCCAMIBObjects, cdccaPeerVendorEntry=cdccaPeerVendorEntry, cdccaPeerRowStatus=cdccaPeerRowStatus, cdccaPeerStatsSTROut=cdccaPeerStatsSTROut, cdccaPeerStatsCCRDropped=cdccaPeerStatsCCRDropped, cdccaHostIpAddrType=cdccaHostIpAddrType, ciscoDiameterCCAPeerCfgGroup=ciscoDiameterCCAPeerCfgGroup, ciscoDiameterCCAMIBNotifs=ciscoDiameterCCAMIBNotifs, cdccaPeerStats=cdccaPeerStats, cdccaPeerStatsCCRIn=cdccaPeerStatsCCRIn, cdccaPeerVendorIndex=cdccaPeerVendorIndex, ciscoDiameterCCAMIBCompliance=ciscoDiameterCCAMIBCompliance, cdccaPeerStatsRAAOut=cdccaPeerStatsRAAOut, ciscoDiameterCCAMIBGroups=ciscoDiameterCCAMIBGroups, cdccaPeerIndex=cdccaPeerIndex, cdccaPeerStatsTable=cdccaPeerStatsTable, cdccaPeerStatsEntry=cdccaPeerStatsEntry, cdccaPeerStatsCCADropped=cdccaPeerStatsCCADropped, cdccaPeerStatsAAAIn=cdccaPeerStatsAAAIn, ciscoDiameterCCAHostCfgGroup=ciscoDiameterCCAHostCfgGroup, cdccaPeerStatsASRIn=cdccaPeerStatsASRIn, cdccaPeerStatsSTAIn=cdccaPeerStatsSTAIn, cdccaPeerStatsSTADropped=cdccaPeerStatsSTADropped, ciscoDiameterCCAPeerStatsGroup=ciscoDiameterCCAPeerStatsGroup, cdccaPeerId=cdccaPeerId, cdccaPeerStatsCCROut=cdccaPeerStatsCCROut, cdccaHostIpAddrTable=cdccaHostIpAddrTable, cdccaPeerFirmwareRevision=cdccaPeerFirmwareRevision, cdccaHostId=cdccaHostId, cdccaPeerVendorId=cdccaPeerVendorId, cdccaPeerStatsASADropped=cdccaPeerStatsASADropped, cdccaPeerStatsASRDropped=cdccaPeerStatsASRDropped, cdccaPeerVendorStorageType=cdccaPeerVendorStorageType, cdccaPeerStatsCCAOut=cdccaPeerStatsCCAOut, cdccaPeerTable=cdccaPeerTable, cdccaPeerStatsRARIn=cdccaPeerStatsRARIn, cdccaPeerStatsAAROut=cdccaPeerStatsAAROut)
| 147.42623 | 2,598 | 0.788391 |
7944d6d2dc0bb5f74a1c75f0c9567b67a314e397 | 1,808 | py | Python | Day 23/RemoveRedundantNodes.py | manvi0308/IIEC-Rise | d2eb68ef60d95b2c1f5e6eb12d623a1fc20c1c99 | [
"MIT"
] | 34 | 2020-09-06T11:04:29.000Z | 2021-11-26T18:25:21.000Z | Day 23/RemoveRedundantNodes.py | manvi0308/IIEC-Rise | d2eb68ef60d95b2c1f5e6eb12d623a1fc20c1c99 | [
"MIT"
] | null | null | null | Day 23/RemoveRedundantNodes.py | manvi0308/IIEC-Rise | d2eb68ef60d95b2c1f5e6eb12d623a1fc20c1c99 | [
"MIT"
] | 10 | 2020-09-08T05:26:42.000Z | 2022-03-06T08:33:39.000Z | ''' 1) Problem Statement : Remove redundant nodes in linked list
2) Logic : Any three adjacent nodes with same x or y value will be a redundant node, if found same x or y value for a
triplet then simply remove the middle node,do so for every three nodes in succession and this way the resultant
list will be redundant free linked list
Time Complexity : O(n)'''
# Node class
class Node:
# Initializer constructor, each node wil have two values x and y
def __init__(self, x_data, y_data, next):
self.x_data = x_data
self.y_data = y_data
self.next = next
# To represent the nodes
def __repr__(self):
return str((self.x_data, self.y_data))
# A function to remove redundant nodes
def removeNodes(head):
current = head
while current.next and current.next.next:
temp = current.next.next
# Checking for a vertical triplet - Will have same x value
if current.x_data == current.next.x_data and current.x_data == temp.x_data:
# delete the middle most node
current.next = temp
# Checking for a horizontal triplet - will have same y value
if current.y_data == current.y_data and current.y_data == temp.y_data:
# delete the middle most node
current.next = temp
# If there is neither horizontal nor vertical triplet
else:
current = current.next
return head
# A utlit function to print the list
def printList(head):
ptr = head
while ptr:
print(ptr,end=' -> ')
ptr = ptr.next
print('NONE')
# Driver code
if __name__ == '__main__':
keys = [(0,1),(0,5),(0,8),(1,8),(8,8),(9,8)]
head = None
for x,y in reversed(keys):
head = Node(x_data,y_data,head)
head = removeNodes(head)
printList(head)
| 27.393939 | 122 | 0.647124 |
7944d75f9cae987ee28f226d4d3d19db36549ab6 | 7,137 | py | Python | tests/test_timing.py | ioancornea/niveristand-python | a7fd578aefa904e9eb0bab00762af0ebba21ada0 | [
"MIT"
] | 6 | 2018-07-04T10:59:43.000Z | 2022-03-24T13:34:33.000Z | tests/test_timing.py | ioancornea/niveristand-python | a7fd578aefa904e9eb0bab00762af0ebba21ada0 | [
"MIT"
] | 14 | 2018-11-05T20:05:33.000Z | 2022-03-10T12:54:58.000Z | tests/test_timing.py | ioancornea/niveristand-python | a7fd578aefa904e9eb0bab00762af0ebba21ada0 | [
"MIT"
] | 15 | 2018-07-04T07:58:49.000Z | 2022-02-22T16:35:26.000Z | import sys
from niveristand import nivs_rt_sequence
from niveristand import realtimesequencetools
from niveristand.clientapi import BooleanValue, DoubleValue, I64Value
from niveristand.clientapi import RealTimeSequence
from niveristand.errors import VeristandError
from niveristand.library import multitask, nivs_yield, seqtime, task, tickcountms, tickcountus
from niveristand.library.timing import wait, wait_until_next_ms_multiple, wait_until_next_us_multiple, \
wait_until_settled
import pytest
from testutilities import rtseqrunner, validation
@nivs_rt_sequence
def wait_nivstype():
init = DoubleValue(0)
duration = DoubleValue(1)
end = DoubleValue(0)
ret = BooleanValue(False)
init.value = seqtime()
end.value = wait(duration) - init.value
if end.value >= duration.value and end.value <= duration.value + 0.1:
ret.value = True
return ret.value
@nivs_rt_sequence
def wait_const():
init = DoubleValue(0)
end = DoubleValue(0)
ret = BooleanValue(False)
init.value = seqtime()
end.value = wait(DoubleValue(1)) - init.value
if end.value >= 1 and end.value <= 1.1:
ret.value = True
return ret.value
@nivs_rt_sequence
def wait_multitask():
ret = BooleanValue(False)
init1 = DoubleValue(0)
end1 = DoubleValue(0)
init2 = DoubleValue(0)
end2 = DoubleValue(0)
tot_init = DoubleValue(0)
tot_end = DoubleValue(0)
tot_init.value = seqtime()
with multitask() as mt:
@task(mt)
def f1():
init1.value = seqtime()
nivs_yield()
end1.value = wait(DoubleValue(1)) - init1.value
@task(mt)
def f2():
init2.value = seqtime()
end2.value = wait(DoubleValue(3)) - init2.value
tot_end.value = seqtime() - tot_init.value
ret.value = tot_end.value >= 3 and tot_end.value <= 4 and \
end1.value >= 1 and end1.value <= 2 and \
end2.value >= 3 and end2.value <= 4
return ret.value
@nivs_rt_sequence
def wait_const_negative():
init = DoubleValue(0)
end = DoubleValue(0)
ret = BooleanValue(True)
init.value = seqtime()
end.value = wait(DoubleValue(-1)) - init.value
if end.value >= 0.1 or end.value < 0:
ret.value = False
return ret.value
@nivs_rt_sequence
def _return_one():
a = DoubleValue(1)
return a.value
@nivs_rt_sequence
def wait_subseq_call():
init = DoubleValue(0)
end = DoubleValue(0)
ret = BooleanValue(False)
init.value = seqtime()
end.value = wait(_return_one()) - init.value
if end.value >= 1 and end.value <= 1.1:
ret.value = True
return ret.value
@nivs_rt_sequence
def wait_until_next_ms():
init = I64Value(0)
end = I64Value(0)
ret = BooleanValue(False)
init.value = tickcountms()
end.value = wait_until_next_ms_multiple(DoubleValue(231)) - init.value
if end.value <= 231 and end.value >= 0:
ret.value = True
return ret.value
@nivs_rt_sequence
def wait_until_next_us():
init = I64Value(0)
end = I64Value(0)
ret = BooleanValue(False)
init.value = tickcountus()
end.value = wait_until_next_us_multiple(DoubleValue(17000)) - init.value
# give this one a few us buffer because come on, no way python can do it all that fast
if end.value <= 22000 and end.value >= 0:
ret.value = True
return ret.value
@nivs_rt_sequence
def wait_until_settled_multitask():
a = DoubleValue(15000)
timer = DoubleValue(0)
ret = BooleanValue(False)
timer.value = seqtime()
with multitask() as mt:
@task(mt)
def monitor():
ret.value = wait_until_settled(a, DoubleValue(1000), DoubleValue(500), DoubleValue(2), DoubleValue(-1))
@task(mt)
def signal():
a.value = 600
wait(DoubleValue(1))
a.value = 12000
wait(DoubleValue(1))
a.value = 300
wait(DoubleValue(1))
a.value = 750
timer.value = seqtime() - timer.value
ret.value = a.value == 750 and timer.value >= 4 and timer.value <= 6 and not ret.value
return ret.value
@nivs_rt_sequence
def wait_until_settled_timeout():
pass_test = BooleanValue(False)
time = DoubleValue(0)
time.value = seqtime()
pass_test.value = wait_until_settled(DoubleValue(100),
DoubleValue(90),
DoubleValue(0),
DoubleValue(2),
DoubleValue(1))
time.value = seqtime() - time.value
pass_test.value &= time.value > 1 and time.value < 1.1
return pass_test.value
@nivs_rt_sequence
def wait_wrong_param_type():
duration = I64Value(1)
wait(duration)
return duration.value
run_tests = [
(wait_nivstype, (), True),
(wait_const, (), True),
(wait_const_negative, (), True),
(wait_subseq_call, (), True),
(wait_multitask, (), True),
(wait_until_next_us, (), True),
(wait_until_next_ms, (), True),
(wait_until_settled_multitask, (), True),
(wait_until_settled_timeout, (), True),
]
skip_tests = [
(wait, (), "Imported RT sequence that we're trying to test."),
(wait_until_next_ms_multiple, (), "Imported RT sequence that we're trying to test."),
(wait_until_next_us_multiple, (), "Imported RT sequence that we're trying to test."),
(wait_until_settled, (), "Imported RT sequence that we're trying to test."),
]
fail_transform_tests = [
(wait_wrong_param_type, (), VeristandError),
]
def idfunc(val):
try:
return val.__name__
except AttributeError:
return str(val)
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_transform(func_name, params, expected_result):
RealTimeSequence(func_name)
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_runpy(func_name, params, expected_result):
actual = func_name(*params)
assert actual == expected_result
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_run_py_as_rts(func_name, params, expected_result):
actual = realtimesequencetools.run_py_as_rtseq(func_name)
assert actual == expected_result
@pytest.mark.parametrize("func_name, params, expected_result", run_tests, ids=idfunc)
def test_run_in_VM(func_name, params, expected_result):
actual = rtseqrunner.run_rtseq_in_VM(func_name, deltat=0.01)
assert actual == expected_result
@pytest.mark.parametrize("func_name, params, expected_result", fail_transform_tests, ids=idfunc)
def test_failures(func_name, params, expected_result):
with pytest.raises(expected_result):
RealTimeSequence(func_name)
with pytest.raises(expected_result):
func_name(*params)
@pytest.mark.parametrize("func_name, params, reason", skip_tests, ids=idfunc)
def test_skipped(func_name, params, reason):
pytest.skip(func_name.__name__ + ": " + reason)
def test_check_all_tested():
validation.test_validate(sys.modules[__name__])
| 28.894737 | 115 | 0.665125 |
7944d7994991b00501d919fea0a11616998b8605 | 8,786 | py | Python | src/track.py | zubovskiii98/FairMOT | 618b47da278a7c580522739239649503e662aad4 | [
"MIT"
] | null | null | null | src/track.py | zubovskiii98/FairMOT | 618b47da278a7c580522739239649503e662aad4 | [
"MIT"
] | null | null | null | src/track.py | zubovskiii98/FairMOT | 618b47da278a7c580522739239649503e662aad4 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
import numpy as np
import torch
from tracker.multitracker import JDETracker
from tracking_utils import visualization as vis
from tracking_utils.log import logger
from tracking_utils.timer import Timer
from tracking_utils.evaluation import Evaluator
import datasets.dataset.jde as datasets
from tracking_utils.utils import mkdir_if_missing
from opts import opts
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
# save results
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
print('OPT: {}'.format(opt))
logger.setLevel(logging.INFO)
result_root = os.path.join('..', 'results', exp_name)
print (result_root)
mkdir_if_missing(result_root)
data_type = 'mot'
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
# get summary
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
opt = opts().init()
print('Opt dir {}'.format(opt))
if not opt.val_mot16:
seqs_str = '''KITTI-13
KITTI-17
ADL-Rundle-6
PETS09-S2L1
TUD-Campus
TUD-Stadtmitte'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/train')
else:
seqs_str = '''MOT16-02
MOT16-04
MOT16-05
MOT16-09
MOT16-10
MOT16-11
MOT16-13'''
data_root = os.path.join(opt.data_dir, 'MOT16/train')
if opt.test_mot16:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = os.path.join(opt.data_dir, 'MOT16/test')
if opt.test_mot15:
seqs_str = '''ADL-Rundle-1
ADL-Rundle-3
AVG-TownCentre
ETH-Crossing
ETH-Jelmoli
ETH-Linthescher
KITTI-16
KITTI-19
PETS09-S2L2
TUD-Crossing
Venice-1'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/test')
if opt.test_mot17:
seqs_str = '''MOT17-01-SDP
MOT17-03-SDP
MOT17-06-SDP
MOT17-07-SDP
MOT17-08-SDP
MOT17-12-SDP
MOT17-14-SDP'''
data_root = os.path.join(opt.data_dir, 'MOT17/images/test')
if opt.val_mot17:
seqs_str = '''MOT17-02-SDP
MOT17-04-SDP
MOT17-05-SDP
MOT17-09-SDP
MOT17-10-SDP
MOT17-11-SDP
MOT17-13-SDP'''
data_root = os.path.join(opt.data_dir, 'MOT17/images/train')
if opt.val_mot15:
seqs_str = '''KITTI-13
KITTI-17
ETH-Bahnhof
ETH-Sunnyday
PETS09-S2L1
TUD-Campus
TUD-Stadtmitte
ADL-Rundle-6
ADL-Rundle-8
ETH-Pedcross2
TUD-Stadtmitte'''
data_root = os.path.join(opt.data_dir, 'MOT15/images/train')
if opt.val_mot20:
seqs_str = '''MOT20-01
MOT20-02
MOT20-03
MOT20-05
'''
data_root = os.path.join(opt.data_dir, 'MOT20/images/train')
if opt.test_mot20:
seqs_str = '''MOT20-04
MOT20-06
MOT20-07
MOT20-08
'''
data_root = os.path.join(opt.data_dir, 'MOT20/images/test')
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
exp_name='MOT15_val_all_dla34',
show_image=False,
save_images=False,
save_videos=False)
| 36.608333 | 116 | 0.548486 |
7944d7e604022d8ea036c41c390939da200cfd87 | 9,584 | py | Python | src/pylhe/__init__.py | jhgoh/pylhe | 436176d8146800aa5dbe79fdf30903aa711d3a55 | [
"Apache-2.0"
] | 11 | 2020-04-29T08:32:01.000Z | 2021-08-29T05:14:34.000Z | src/pylhe/__init__.py | jhgoh/pylhe | 436176d8146800aa5dbe79fdf30903aa711d3a55 | [
"Apache-2.0"
] | 86 | 2020-04-29T08:13:21.000Z | 2022-03-29T04:20:22.000Z | src/pylhe/__init__.py | jhgoh/pylhe | 436176d8146800aa5dbe79fdf30903aa711d3a55 | [
"Apache-2.0"
] | 11 | 2020-05-13T09:40:06.000Z | 2021-10-31T03:45:54.000Z | import gzip
import os
import subprocess
import xml.etree.ElementTree as ET
import networkx as nx
import tex2pix
from particle.converters.bimap import DirectionalMaps
from ._version import version as __version__
from .awkward import register_awkward, to_awkward
__all__ = [
"__version__",
"LHEEvent",
"LHEEventInfo",
"LHEFile",
"LHEInit",
"LHEParticle",
"LHEProcInfo",
"loads",
"read_lhe",
"read_lhe_init",
"read_lhe_with_attributes",
"read_num_events",
"register_awkward",
"to_awkward",
"visualize",
]
# Python 3.7+
def __dir__():
return __all__
class LHEFile:
def __init__(self):
pass
class LHEEvent:
def __init__(
self, eventinfo, particles, weights=None, attributes=None, optional=None
):
self.eventinfo = eventinfo
self.particles = particles
self.weights = weights
self.attributes = attributes
self.optional = optional
for p in self.particles:
p.event = self
def visualize(self, outputname):
visualize(self, outputname)
class LHEEventInfo:
fieldnames = ["nparticles", "pid", "weight", "scale", "aqed", "aqcd"]
def __init__(self, **kwargs):
if set(kwargs.keys()) != set(self.fieldnames):
raise RuntimeError
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def fromstring(cls, string):
return cls(**dict(zip(cls.fieldnames, map(float, string.split()))))
class LHEParticle:
fieldnames = [
"id",
"status",
"mother1",
"mother2",
"color1",
"color2",
"px",
"py",
"pz",
"e",
"m",
"lifetime",
"spin",
]
def __init__(self, **kwargs):
if set(kwargs.keys()) != set(self.fieldnames):
raise RuntimeError
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def fromstring(cls, string):
return cls(**dict(zip(cls.fieldnames, map(float, string.split()))))
def mothers(self):
first_idx = int(self.mother1) - 1
second_idx = int(self.mother2) - 1
return [
self.event.particles[idx] for idx in {first_idx, second_idx} if idx >= 0
]
class LHEInit(dict):
"""Store the <init> block as dict."""
fieldnames = [
"beamA",
"beamB",
"energyA",
"energyB",
"PDFgroupA",
"PDFgroupB",
"PDFsetA",
"PDFsetB",
"weightingStrategy",
"numProcesses",
]
def __init__(self):
pass
@classmethod
def fromstring(cls, string):
return dict(zip(cls.fieldnames, map(float, string.split())))
class LHEProcInfo(dict):
"""Store the process info block as dict."""
fieldnames = ["xSection", "error", "unitWeight", "procId"]
def __init__(self):
pass
@classmethod
def fromstring(cls, string):
return dict(zip(cls.fieldnames, map(float, string.split())))
def loads():
pass
def _extract_fileobj(filepath):
"""
Checks to see if a file is compressed, and if so, extract it with gzip
so that the uncompressed file can be returned.
It returns a file object containing XML data that will be ingested by
``xml.etree.ElementTree.iterparse``.
Args:
filepath: A path-like object or str.
Returns:
_io.BufferedReader or gzip.GzipFile: A file object containing XML data.
"""
with open(filepath, "rb") as gzip_file:
header = gzip_file.read(2)
gzip_magic_number = b"\x1f\x8b"
return (
gzip.GzipFile(filepath) if header == gzip_magic_number else open(filepath, "rb")
)
def read_lhe_init(filepath):
"""
Read and return the init blocks. This encodes the weight group
and related things according to https://arxiv.org/abs/1405.1067
Args:
filepath: A path-like object or str.
Returns:
dict: Dictionary containing the init blocks of the LHE file.
"""
initDict = {}
with _extract_fileobj(filepath) as fileobj:
for event, element in ET.iterparse(fileobj, events=["end"]):
if element.tag == "init":
data = element.text.split("\n")[1:-1]
initDict["initInfo"] = LHEInit.fromstring(data[0])
initDict["procInfo"] = [LHEProcInfo.fromstring(d) for d in data[1:]]
if element.tag == "initrwgt":
initDict["weightgroup"] = {}
for child in element:
# Find all weightgroups
if child.tag == "weightgroup" and child.attrib != {}:
try:
wg_type = child.attrib["type"]
except KeyError:
print("weightgroup must have attribute 'type'")
raise
_temp = {"attrib": child.attrib, "weights": {}}
# Iterate over all weights in this weightgroup
for w in child:
if w.tag != "weight":
continue
try:
wg_id = w.attrib["id"]
except KeyError:
print("weight must have attribute 'id'")
raise
_temp["weights"][wg_id] = {
"attrib": w.attrib,
"name": w.text.strip(),
}
initDict["weightgroup"][wg_type] = _temp
if element.tag == "event":
break
return initDict
def read_lhe(filepath):
try:
with _extract_fileobj(filepath) as fileobj:
for event, element in ET.iterparse(fileobj, events=["end"]):
if element.tag == "event":
data = element.text.split("\n")[1:-1]
eventdata, particles = data[0], data[1:]
eventinfo = LHEEventInfo.fromstring(eventdata)
particle_objs = [LHEParticle.fromstring(p) for p in particles]
yield LHEEvent(eventinfo, particle_objs)
except ET.ParseError as excep:
print("WARNING. Parse Error:", excep)
return
def read_lhe_with_attributes(filepath):
"""
Iterate through file, similar to read_lhe but also set
weights and attributes.
"""
try:
with _extract_fileobj(filepath) as fileobj:
for event, element in ET.iterparse(fileobj, events=["end"]):
if element.tag == "event":
eventdict = {}
data = element.text.split("\n")[1:-1]
eventdata, particles = data[0], data[1:]
eventdict["eventinfo"] = LHEEventInfo.fromstring(eventdata)
eventdict["particles"] = []
eventdict["weights"] = {}
eventdict["attrib"] = element.attrib
eventdict["optional"] = []
for p in particles:
if not p.strip().startswith("#"):
eventdict["particles"] += [LHEParticle.fromstring(p)]
else:
eventdict["optional"].append(p.strip())
for sub in element:
if sub.tag == "rwgt":
for r in sub:
if r.tag == "wgt":
eventdict["weights"][r.attrib["id"]] = float(
r.text.strip()
)
# yield eventdict
yield LHEEvent(
eventdict["eventinfo"],
eventdict["particles"],
eventdict["weights"],
eventdict["attrib"],
eventdict["optional"],
)
except ET.ParseError:
print("WARNING. Parse Error.")
return
def read_num_events(filepath):
"""
Moderately efficient way to get the number of events stored in file.
"""
with _extract_fileobj(filepath) as fileobj:
return sum(
element.tag == "event"
for event, element in ET.iterparse(fileobj, events=["end"])
)
def visualize(event, outputname):
"""
Create a PDF with a visualisation of the LHE event record as a directed graph
"""
# retrieve mapping of PDG ID to particle name as LaTeX string
_PDGID2LaTeXNameMap, _ = DirectionalMaps(
"PDGID", "LATEXNAME", converters=(int, str)
)
# draw graph
g = nx.DiGraph()
for i, p in enumerate(event.particles):
g.add_node(i, attr_dict=p.__dict__)
try:
iid = int(p.id)
name = _PDGID2LaTeXNameMap[iid]
texlbl = f"${name}$"
except KeyError:
texlbl = str(int(p.id))
g.nodes[i].update(texlbl=texlbl)
for i, p in enumerate(event.particles):
for mom in p.mothers():
g.add_edge(event.particles.index(mom), i)
nx.nx_pydot.write_dot(g, "event.dot")
p = subprocess.Popen(["dot2tex", "event.dot"], stdout=subprocess.PIPE)
tex = p.stdout.read().decode()
tex2pix.Renderer(tex).mkpdf(outputname)
subprocess.check_call(["pdfcrop", outputname, outputname])
os.remove("event.dot")
| 30.233438 | 88 | 0.527755 |
7944d8c2a0be1b893f853c1269963516b68fc67f | 2,371 | py | Python | DoublyLinkedList.py | Shubhammehta2012/Linked-List | 808c9ecbf85c2dc68a48c554a349257dfdd6572a | [
"Apache-2.0"
] | null | null | null | DoublyLinkedList.py | Shubhammehta2012/Linked-List | 808c9ecbf85c2dc68a48c554a349257dfdd6572a | [
"Apache-2.0"
] | null | null | null | DoublyLinkedList.py | Shubhammehta2012/Linked-List | 808c9ecbf85c2dc68a48c554a349257dfdd6572a | [
"Apache-2.0"
] | null | null | null | class Node(object):
# Each node has its data and a pointer that points to next node in the Linked List
def __init__(self, data, next = None, previous = None):
self.data = data;
self.next = next;
self.previous = previous
class DoublyLinkedList(object):
def __init__(self):
self.head = None
# for inserting at beginning of linked list
def insertAtStart(self, data):
if self.head == None:
newNode = Node(data)
self.head = newNode
else:
newNode = Node(data)
self.head.previous = newNode
newNode.next = self.head
self.head = newNode
# for inserting at end of linked list
def insertAtEnd(self, data):
newNode = Node(data)
temp = self.head
while(temp.next != None):
temp = temp.next
temp.next = newNode
newNode.previous = temp
# deleting a node from linked list
def delete(self, data):
temp = self.head
if(temp.next != None):
# if head node is to be deleted
if(temp.data == data):
temp.next.previous = None
self.head = temp.next
temp.next = None
return
else:
while(temp.next != None):
if(temp.data == data):
break
temp = temp.next
if(temp.next):
# if element to be deleted is in between
temp.previous.next = temp.next
temp.next.previous = temp.previous
temp.next = None
temp.previous = None
else:
# if element to be deleted is the last element
temp.previous.next = None
temp.previous = None
return
if (temp == None):
return
# for printing the contents of linked lists
def printdll(self):
temp = self.head
while(temp != None):
print(temp.data, end=' ')
temp = temp.next
if __name__ == '__main__':
dll = DoublyLinkedList()
dll.insertAtStart(1)
dll.insertAtStart(2)
dll.insertAtEnd(3)
dll.insertAtStart(4)
dll.printdll()
dll.delete(2)
print()
dll.printdll()
| 30.012658 | 86 | 0.510755 |
7944d8e5687b2093e6d6f29ff6cafd339db60bdb | 591 | py | Python | codertheory/utils/sftpstorage.py | codertheory/backend | 66017e4f484414b878d2a6e78fa623870aa38cfb | [
"MIT"
] | null | null | null | codertheory/utils/sftpstorage.py | codertheory/backend | 66017e4f484414b878d2a6e78fa623870aa38cfb | [
"MIT"
] | null | null | null | codertheory/utils/sftpstorage.py | codertheory/backend | 66017e4f484414b878d2a6e78fa623870aa38cfb | [
"MIT"
] | null | null | null | import io
from django.utils.deconstruct import deconstructible
from storages.backends import sftpstorage
@deconstructible
class CustomSFTPStorage(sftpstorage.SFTPStorage):
def _open(self, name, mode='rb'):
return CustomSFTPStorageFile(name, self, mode)
class CustomSFTPStorageFile(sftpstorage.SFTPStorageFile):
def write(self, content):
if 'w' not in self.mode and "a" not in self.mode:
raise AttributeError("File was opened for read-only access.")
self.file = io.BytesIO(content)
self._is_dirty = True
self._is_read = True
| 26.863636 | 73 | 0.714044 |
7944da3322d338ce43706a807a115941e37b7afc | 83 | py | Python | 13. Modules - Lab/03_triangle.py | elenaborisova/Python-Advanced | 4c266d81f294372c3599741e8ba53f59fdc834c5 | [
"MIT"
] | 2 | 2021-04-04T06:26:13.000Z | 2022-02-18T22:21:49.000Z | 13. Modules - Lab/03_triangle.py | elenaborisova/Python-Advanced | 4c266d81f294372c3599741e8ba53f59fdc834c5 | [
"MIT"
] | null | null | null | 13. Modules - Lab/03_triangle.py | elenaborisova/Python-Advanced | 4c266d81f294372c3599741e8ba53f59fdc834c5 | [
"MIT"
] | 3 | 2021-02-01T12:32:03.000Z | 2021-04-12T13:45:20.000Z | from triangle_print import print_triangle
size = int(input())
print_triangle(size) | 20.75 | 41 | 0.819277 |
7944daaa1fd945359835a88bdcd278ad38283bdd | 1,332 | py | Python | interface.py | NilanjanDaw/Ground-based-Control-and-Remote-Stabilization-for-MAV | 86fe2e73d43f8384b42931cedbc5d7dc7ef90e67 | [
"MIT"
] | null | null | null | interface.py | NilanjanDaw/Ground-based-Control-and-Remote-Stabilization-for-MAV | 86fe2e73d43f8384b42931cedbc5d7dc7ef90e67 | [
"MIT"
] | null | null | null | interface.py | NilanjanDaw/Ground-based-Control-and-Remote-Stabilization-for-MAV | 86fe2e73d43f8384b42931cedbc5d7dc7ef90e67 | [
"MIT"
] | null | null | null | from drone_controller import Drone
import time, os, sys
from threading import Thread
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/lib")
import cflib
from cflib.crazyflie import Crazyflie
"""
Initializer class to establish drone-radio data link
"""
class interfacer:
def _controller(self):
cflib.crtp.init_drivers(enable_debug_driver=False)
# Scan for Crazyflies and use the first one found
print "Scanning interfaces for Crazyflies..."
available = cflib.crtp.scan_interfaces()
print "Crazyflies found:"
print available
for i in available:
print i[0]
if len(available) > 0:
print len(available) - 1
# print(available[0][len(available) - 1])
drone = Drone(available[len(available) - 1][0])
while not drone.connected:
time.sleep(0.5)
print "Test Connection Done!!!"
Thread(target=drone. _motor_control).start()
Drone.thruster = 0
print(drone.thruster)
time.sleep(1)
Drone.thruster = 1
print(drone.thruster)
time.sleep(1)
Drone.thruster = -1
print(drone.thruster)
time.sleep(0.5)
else:
print "No drones found!!!!" | 30.976744 | 68 | 0.590841 |
7944dc33c672b9b7d3629eae8c0054e065617914 | 9,441 | py | Python | gybe/kubernetes/io/k8s/api/extensions/v1beta1.py | peterth3/gybe | c6219e45a0c931ddb85da41607c9246407a68fd1 | [
"MIT"
] | 2 | 2021-07-12T12:59:08.000Z | 2021-10-07T18:01:58.000Z | gybe/kubernetes/io/k8s/api/extensions/v1beta1.py | peterth3/gybe | c6219e45a0c931ddb85da41607c9246407a68fd1 | [
"MIT"
] | null | null | null | gybe/kubernetes/io/k8s/api/extensions/v1beta1.py | peterth3/gybe | c6219e45a0c931ddb85da41607c9246407a68fd1 | [
"MIT"
] | null | null | null | # generated by datamodel-codegen:
# filename: _definitions.json
# timestamp: 2021-01-20T05:35:16+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel, Field
from ..... import Kind92, Kind93
from ...apimachinery.pkg.apis.meta import v1 as v1_1
from ...apimachinery.pkg.util import intstr
from ..core import v1
class IngressTLS(BaseModel):
hosts: Optional[List[str]] = Field(
None,
description='Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.',
)
secretName: Optional[str] = Field(
None,
description='SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the "Host" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.',
)
class IngressBackend(BaseModel):
resource: Optional[v1.TypedLocalObjectReference] = Field(
None,
description='Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.',
)
serviceName: Optional[str] = Field(
None, description='Specifies the name of the referenced service.'
)
servicePort: Optional[intstr.IntOrString] = Field(
None, description='Specifies the port of the referenced service.'
)
class IngressStatus(BaseModel):
loadBalancer: Optional[v1.LoadBalancerStatus] = Field(
None,
description='LoadBalancer contains the current status of the load-balancer.',
)
class HTTPIngressPath(BaseModel):
backend: IngressBackend = Field(
...,
description='Backend defines the referenced service endpoint to which the traffic will be forwarded to.',
)
path: Optional[str] = Field(
None,
description='Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional "path" part of a URL as defined by RFC 3986. Paths must begin with a \'/\'. When unspecified, all paths from incoming requests are matched.',
)
pathType: Optional[str] = Field(
None,
description="PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.",
)
class HTTPIngressRuleValue(BaseModel):
paths: List[HTTPIngressPath] = Field(
..., description='A collection of paths that map requests to backends.'
)
class IngressRule(BaseModel):
host: Optional[str] = Field(
None,
description='Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the "host" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be "precise" which is a domain name without the terminating dot of a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name prefixed with a single wildcard label (e.g. "*.foo.com"). The wildcard character \'*\' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.',
)
http: Optional[HTTPIngressRuleValue] = None
class IngressSpec(BaseModel):
backend: Optional[IngressBackend] = Field(
None,
description="A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.",
)
ingressClassName: Optional[str] = Field(
None,
description='IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.',
)
rules: Optional[List[IngressRule]] = Field(
None,
description='A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.',
)
tls: Optional[List[IngressTLS]] = Field(
None,
description='TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.',
)
class Ingress(BaseModel):
apiVersion: Optional[str] = Field(
None,
description='APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources',
)
kind: Optional[Kind92] = Field(
None,
description='Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds',
)
metadata: Optional[v1_1.ObjectMeta] = Field(
None,
description="Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
)
spec: Optional[IngressSpec] = Field(
None,
description='Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status',
)
status: Optional[IngressStatus] = Field(
None,
description='Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status',
)
class IngressList(BaseModel):
apiVersion: Optional[str] = Field(
None,
description='APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources',
)
items: List[Ingress] = Field(..., description='Items is the list of Ingress.')
kind: Optional[Kind93] = Field(
None,
description='Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds',
)
metadata: Optional[v1_1.ListMeta] = Field(
None,
description="Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
)
| 70.984962 | 1,389 | 0.741659 |
7944dc358c8cae948817895f0bc059aa83a23e1f | 345 | py | Python | backend/cli.py | etalab/suivi-ouverture | 0803fe1b522a778d0b3368ca6b8017b2fdbcc0e0 | [
"MIT"
] | 2 | 2021-06-07T13:01:44.000Z | 2022-03-28T14:42:32.000Z | backend/cli.py | etalab/economiecirculaire | fcce071bb9e028b74ec5255857180165a2241dee | [
"MIT"
] | 18 | 2019-11-07T09:36:55.000Z | 2022-02-27T08:00:28.000Z | backend/cli.py | etalab/suivi-ouverture | 0803fe1b522a778d0b3368ca6b8017b2fdbcc0e0 | [
"MIT"
] | 3 | 2019-11-06T15:58:07.000Z | 2021-06-11T08:16:16.000Z | import click
from app import app, CACHE_CONFIG
from flask_caching import Cache
cache = Cache()
@click.group()
def cli():
pass
@cli.command()
def flush_cache():
"""Flush cache"""
cache.init_app(app, config=CACHE_CONFIG)
with app.app_context():
cache.clear()
print('Done.')
if __name__ == "__main__":
cli()
| 13.8 | 44 | 0.646377 |
7944dceebbcb826ba9c50a7f7916bbeca6b14436 | 272 | py | Python | examples/where.py | nahid/py-jsonq | badfde55ab369e8daf1dcb36dfe782d808afe6a3 | [
"MIT"
] | null | null | null | examples/where.py | nahid/py-jsonq | badfde55ab369e8daf1dcb36dfe782d808afe6a3 | [
"MIT"
] | null | null | null | examples/where.py | nahid/py-jsonq | badfde55ab369e8daf1dcb36dfe782d808afe6a3 | [
"MIT"
] | 1 | 2019-09-20T01:27:33.000Z | 2019-09-20T01:27:33.000Z | from pyjsonq.query import JsonQ
e1 = JsonQ("./data.json").at("users").where("id", ">", 3).where("location", "=", "Barisal").get()
print("result", e1)
e2 = JsonQ("./data.json").at("users").where("id", ">", 3).where("location", "=", "Barisal").get()
print("result", e2)
| 27.2 | 97 | 0.584559 |
7944dd19baf829f03e783e2febe0e4fc7cd87e7b | 709 | py | Python | sandbox/demo_interactive.py | omars11/ompc | 459446df9bf15946b8a16b517151a30c9aff6a33 | [
"BSD-3-Clause"
] | 11 | 2019-12-15T23:50:46.000Z | 2022-01-29T14:27:52.000Z | sandbox/demo_interactive.py | omars11/ompc | 459446df9bf15946b8a16b517151a30c9aff6a33 | [
"BSD-3-Clause"
] | null | null | null | sandbox/demo_interactive.py | omars11/ompc | 459446df9bf15946b8a16b517151a30c9aff6a33 | [
"BSD-3-Clause"
] | 5 | 2019-08-22T16:47:51.000Z | 2021-12-06T07:14:32.000Z | ########################333
# from IPyhton/Shell.py
def get_tk():
try: import Tkinter
except ImportError: return None
else:
hijack_tk()
r = Tkinter.Tk()
r.withdraw()
return r
def hijack_tk():
def misc_mainloop(self, n=0): pass
def tkinter_mainloop(n=0): pass
import Tkinter
Tkinter.Misc.mainloop = misc_mainloop
#Tkinter.mainloop = tkinter_mainloop
_tk = get_tk()
import matplotlib
matplotlib.interactive(True)
def _init_test():
exec execfile('test.py') in globals(), globals()
from threading import Thread
t = Thread(None, _init_test)
t.start()
_tk.mainloop()
print 'dsfds2'
t.join()
print 'dsfds'
| 20.852941 | 53 | 0.616361 |
7944dd7dc2767b54f00859ee069e575c42a56633 | 10,580 | py | Python | sdk/python/pulumi_azure_native/relay/hybrid_connection_authorization_rule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/relay/hybrid_connection_authorization_rule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/relay/hybrid_connection_authorization_rule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = ['HybridConnectionAuthorizationRuleArgs', 'HybridConnectionAuthorizationRule']
@pulumi.input_type
class HybridConnectionAuthorizationRuleArgs:
def __init__(__self__, *,
hybrid_connection_name: pulumi.Input[str],
namespace_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
rights: pulumi.Input[Sequence[pulumi.Input['AccessRights']]],
authorization_rule_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a HybridConnectionAuthorizationRule resource.
:param pulumi.Input[str] hybrid_connection_name: The hybrid connection name.
:param pulumi.Input[str] namespace_name: The namespace name
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[Sequence[pulumi.Input['AccessRights']]] rights: The rights associated with the rule.
:param pulumi.Input[str] authorization_rule_name: The authorization rule name.
"""
pulumi.set(__self__, "hybrid_connection_name", hybrid_connection_name)
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "rights", rights)
if authorization_rule_name is not None:
pulumi.set(__self__, "authorization_rule_name", authorization_rule_name)
@property
@pulumi.getter(name="hybridConnectionName")
def hybrid_connection_name(self) -> pulumi.Input[str]:
"""
The hybrid connection name.
"""
return pulumi.get(self, "hybrid_connection_name")
@hybrid_connection_name.setter
def hybrid_connection_name(self, value: pulumi.Input[str]):
pulumi.set(self, "hybrid_connection_name", value)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
The namespace name
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the Resource group within the Azure subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def rights(self) -> pulumi.Input[Sequence[pulumi.Input['AccessRights']]]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@rights.setter
def rights(self, value: pulumi.Input[Sequence[pulumi.Input['AccessRights']]]):
pulumi.set(self, "rights", value)
@property
@pulumi.getter(name="authorizationRuleName")
def authorization_rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The authorization rule name.
"""
return pulumi.get(self, "authorization_rule_name")
@authorization_rule_name.setter
def authorization_rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_rule_name", value)
class HybridConnectionAuthorizationRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_rule_name: Optional[pulumi.Input[str]] = None,
hybrid_connection_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rights: Optional[pulumi.Input[Sequence[pulumi.Input['AccessRights']]]] = None,
__props__=None):
"""
Description of a namespace authorization rule.
API Version: 2017-04-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_rule_name: The authorization rule name.
:param pulumi.Input[str] hybrid_connection_name: The hybrid connection name.
:param pulumi.Input[str] namespace_name: The namespace name
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[Sequence[pulumi.Input['AccessRights']]] rights: The rights associated with the rule.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HybridConnectionAuthorizationRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Description of a namespace authorization rule.
API Version: 2017-04-01.
:param str resource_name: The name of the resource.
:param HybridConnectionAuthorizationRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HybridConnectionAuthorizationRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_rule_name: Optional[pulumi.Input[str]] = None,
hybrid_connection_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rights: Optional[pulumi.Input[Sequence[pulumi.Input['AccessRights']]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HybridConnectionAuthorizationRuleArgs.__new__(HybridConnectionAuthorizationRuleArgs)
__props__.__dict__["authorization_rule_name"] = authorization_rule_name
if hybrid_connection_name is None and not opts.urn:
raise TypeError("Missing required property 'hybrid_connection_name'")
__props__.__dict__["hybrid_connection_name"] = hybrid_connection_name
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__.__dict__["namespace_name"] = namespace_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if rights is None and not opts.urn:
raise TypeError("Missing required property 'rights'")
__props__.__dict__["rights"] = rights
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:relay:HybridConnectionAuthorizationRule"), pulumi.Alias(type_="azure-native:relay/v20160701:HybridConnectionAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:relay/v20160701:HybridConnectionAuthorizationRule"), pulumi.Alias(type_="azure-native:relay/v20170401:HybridConnectionAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:relay/v20170401:HybridConnectionAuthorizationRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(HybridConnectionAuthorizationRule, __self__).__init__(
'azure-native:relay:HybridConnectionAuthorizationRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'HybridConnectionAuthorizationRule':
"""
Get an existing HybridConnectionAuthorizationRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = HybridConnectionAuthorizationRuleArgs.__new__(HybridConnectionAuthorizationRuleArgs)
__props__.__dict__["name"] = None
__props__.__dict__["rights"] = None
__props__.__dict__["type"] = None
return HybridConnectionAuthorizationRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def rights(self) -> pulumi.Output[Sequence[str]]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 45.800866 | 476 | 0.671078 |
7944ddc921bdc75ca843ca7b7bb1415a642c2d9c | 6,195 | py | Python | image_enhancement/Network.py | xiaxx244/shadow_pose_estimation | 74cc1090ef0e7f6573fb64ce1f50c50123a1b335 | [
"MIT"
] | 3 | 2020-11-25T14:34:01.000Z | 2020-11-29T10:11:53.000Z | image_enhancement/Network.py | xiaxx244/shadow_pose_estimation | 74cc1090ef0e7f6573fb64ce1f50c50123a1b335 | [
"MIT"
] | null | null | null | image_enhancement/Network.py | xiaxx244/shadow_pose_estimation | 74cc1090ef0e7f6573fb64ce1f50c50123a1b335 | [
"MIT"
] | null | null | null | import keras
import tensorflow as tf
from keras.layers import Input, Conv2D, Conv2DTranspose, Concatenate
from keras.applications. resnet50 import ResNet50
from keras.models import Model
from keras.utils import multi_gpu_model
from keras.layers import Activation, Dense,BatchNormalization
import sys, os
sys.path.append(os.path.abspath(os.path.join('../', 'models/')))
def build_resnet50(image_input):
image_input=Input(shape=image_input)
with tf.device('/cpu:0'):
resnet_model = ResNet50(input_tensor=image_input,include_top=False, weights='imagenet')
x = resnet_model.get_layer('res2a_branch2a').output
#x = Conv2D(16, (3,3), padding="same", activation="relu")(x)
resnet_model.trainable = False
with tf.device('/cpu:0'):
model=Model(inputs=resnet_model.input, outputs=x)
return multi_gpu_model(model,gpus=2)
def build_imh(input_shape):
def EM4(input,enhanced, channel):
reshape1=Activation("relu")(Conv2D(32,(3,3),padding="same",data_format="channels_last")(Concatenate(axis=3)([enhanced,input])))
#reshape=Activation("relu")(Conv2D(channel,(3,3),padding="same",data_format="channels_last")(input))
reshape=keras.layers.MaxPool2D(pool_size=(3,3),strides=1,padding="same")(reshape1)
#reshape2=Conv2D(channel,(3,3),activation="relu", padding="same",data_format="channels_last")(reshape)
conv_1=Conv2D(32,kernel_size=(3,3),strides=1, padding="same")(reshape)
conv_2=Activation("relu")(conv_1)
conv_2=Conv2D(filters=32,kernel_size=(3,3),strides=1, padding="same")(conv_2)
add_1=keras.layers.Add()([reshape, conv_2])#
res1=Activation("relu")(add_1)
#add_2=Concatenate(axis=3)([res1, input])
#add_1 = Activation('relu')(BatchNormalization(axis=3)(add_1))
#max_1=keras.layers.MaxPool2D(pool_size=(3,3),strides=1,padding="same")(res1)
conv_3=Conv2D(filters=32,kernel_size=(3,3),strides=1, padding="same")(res1)
conv_4=Activation("relu")(conv_3)
conv_4=Conv2D(filters=32,kernel_size=(3,3),strides=1, padding="same")(conv_4)
add_3=keras.layers.Add()([res1, conv_4])#
res2=Activation("relu")(add_3)
#add_4=Concatenate(axis=3)([res2,res1,enhanced, input])
#dense_1=denseblock(add_4)
#max_4=keras.layers.MaxPool2D(pool_size=(3,3),strides=1,padding="same")(add_4)
#max_4=Conv2D(32,(3,3),padding="same",data_format="channels_last")(max_4)
#add_2 = Activation('relu')(BatchNormalization(axis=3)(add_2))
conv_5=Conv2D(filters=64,kernel_size=(3,3),strides=1, padding="same")(res2)
conv_6=Activation("relu")(conv_5)
conv_6=Conv2D(filters=64,kernel_size=(3,3),strides=1, padding="same")(conv_6)
add_5=Concatenate(axis=3)([res2, res1, conv_6])#
res3=Activation("relu")(add_5)
conv_7=Conv2D(filters=64,kernel_size=(3,3),strides=1, padding="same")(res3)
conv_10=Conv2D(3,(3,3),padding="same",data_format="channels_last")(conv_7)
#res=keras.layers.Add()([conv_10, input])
return Model(inputs=input,outputs=conv_10),conv_10
def EM5(input,channel):
reshape1=Activation("relu")(Conv2D(32,(3,3),padding="same",data_format="channels_last")(input))
reshape=keras.layers.MaxPool2D(pool_size=(3,3),strides=1,padding="same")(reshape1)
#reshape2=Conv2D(channel,(3,3),activation="relu", padding="same",data_format="channels_last")(reshape)
conv_1=Conv2D(32,kernel_size=(3,3),strides=1, padding="same")(reshape)
conv_2=Activation("relu")(conv_1)
conv_2=Conv2D(filters=32,kernel_size=(3,3),strides=1, padding="same")(conv_2)
add_1=keras.layers.Add()([reshape, conv_2])#
res1=Activation("relu")(add_1)
#add_2=Concatenate(axis=3)([res1, input])
#add_1 = Activation('relu')(BatchNormalization(axis=3)(add_1))
#max_1=keras.layers.MaxPool2D(pool_size=(3,3),strides=1,padding="same")(res1)
conv_3=Conv2D(filters=32,kernel_size=(3,3),strides=1, padding="same")(res1)
conv_4=Activation("relu")(conv_3)
conv_4=Conv2D(filters=32,kernel_size=(3,3),strides=1, padding="same")(conv_4)
add_3=keras.layers.Add()([res1, conv_4])#
res2=Activation("relu")(add_3)
#add_4=Concatenate(axis=3)([res2,res1,input])
#dense_1=denseblock(add_4)
#max_4=keras.layers.MaxPool2D(pool_size=(3,3),strides=1,padding="same")(add_4)
#max_4=Conv2D(32,(3,3),padding="same",data_format="channels_last")(max_4)
#add_2 = Activation('relu')(BatchNormalization(axis=3)(add_2))
conv_5=Conv2D(filters=64,kernel_size=(3,3),strides=1, padding="same")(res2)
conv_6=Activation("relu")(conv_5)
conv_6=Conv2D(filters=64,kernel_size=(3,3),strides=1, padding="same")(conv_6)
add_5=Concatenate(axis=3)([res2, res1, conv_6])#
res3=Activation("relu")(add_5)
#add_6=Concatenate(axis=3)([res3,reshape])
conv_7=Conv2D(filters=64,kernel_size=(3,3),strides=1, padding="same")(res3)
#conv_8=Conv2D(filters=128,kernel_size=(3,3),strides=1, padding="same")(conv_7)
#conv_8=Conv2D(filters=16,kernel_size=(3,3),strides=1, padding="same")(conv_7)
#add_3 = Activation('relu')(BatchNormalization(axis=3)(add_3))
conv_10=Conv2D(3,(3,3),padding="same",data_format="channels_last")(conv_7)
#add_6=Concatenate(axis=3)([res3,add_4])
#max_5=keras.layers.MaxPool2D(pool_size=(3,3),strides=1,padding="same")(add_5)
#add_3 = Activation('relu')(BatchNormalization(axis=3)(add_3))
#conv_10=Conv2D(3,(3,3),padding="same",data_format="channels_last")(res3)
#res=keras.layers.Add()([conv_10, input])
return Model(inputs=input,outputs=conv_10),conv_10#
inputs=Input(shape=(256,256,3))
model_1,res_1 = EM5(inputs,16)
model_2,res_2 = EM4(inputs, res_1, 16)
model_3,res_3 = EM4(inputs, res_2, 16)
with tf.device('/cpu:0'):
model=Model(inputs,outputs=[res_1, res_2,res_3])
return multi_gpu_model(model,gpus=2)
| 45.551471 | 136 | 0.658757 |
7944ddc92ab32c47829a0e2481d8c7cdc699c23b | 3,569 | py | Python | model_search/generators/replay_generator.py | dywsjtu/model_search | 116c4f9016d8b89cf06d057dda020dae3371f211 | [
"Apache-2.0"
] | 3,315 | 2021-01-20T15:21:37.000Z | 2022-03-30T18:21:29.000Z | model_search/generators/replay_generator.py | dywsjtu/model_search | 116c4f9016d8b89cf06d057dda020dae3371f211 | [
"Apache-2.0"
] | 57 | 2021-01-19T20:51:03.000Z | 2022-03-24T11:04:07.000Z | model_search/generators/replay_generator.py | dywsjtu/model_search | 116c4f9016d8b89cf06d057dda020dae3371f211 | [
"Apache-2.0"
] | 380 | 2021-02-20T01:31:35.000Z | 2022-03-31T16:48:58.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Generate replay towers for the ensemble.
A replay here means a neural network tower(s) that is already trained, and we
know its performance in terms of loss.
This generator is to retrain an existing ensemble.
"""
import os
from model_search.architecture import architecture_utils
from model_search.generators import base_tower_generator
from model_search.generators import trial_utils
class ReplayGenerator(base_tower_generator.BaseTowerGenerator):
"""Generates prior towers for Replay in Phoenix."""
def __init__(self, phoenix_spec, metadata):
"""Initializes the object."""
super(ReplayGenerator, self).__init__(
phoenix_spec=phoenix_spec, metadata=metadata)
self._ensemble_spec = self._phoenix_spec.ensemble_spec
self._force_freeze = True
def generator_name(self):
return "replay_generator"
def first_time_chief_generate(self, features, input_layer_fn, trial_mode,
shared_input_tensor, shared_lengths,
logits_dimension, hparams, run_config,
is_training, trials):
"""Creates the prior for the ensemble."""
my_id = architecture_utils.DirectoryHandler.get_trial_id(
run_config.model_dir, self._phoenix_spec)
# Adaptive ensemble - build gradually, import last trial in replay.
if trial_utils.adaptive_or_residual_ensemble(self._phoenix_spec):
previous_model_dir = os.path.join(
os.path.dirname(run_config.model_dir), str(int(my_id) - 1))
return trial_utils.import_towers_one_trial(
features=features,
input_layer_fn=input_layer_fn,
phoenix_spec=self._phoenix_spec,
shared_input_tensor=shared_input_tensor,
shared_lengths=shared_lengths,
is_training=is_training,
logits_dimension=logits_dimension,
prev_model_dir=previous_model_dir,
force_freeze=self._force_freeze,
allow_auxiliary_head=self._allow_auxiliary_head,
caller_generator=self.generator_name(),
my_model_dir=run_config.model_dir)
# Non adaptive - import all towers after all are trained.
if trial_utils.non_adaptive_or_intermixed_ensemble(self._phoenix_spec):
previous_model_dirs = [
os.path.join(os.path.dirname(run_config.model_dir), str(i + 1))
for i in range(my_id - 1)
]
return trial_utils.import_towers_multiple_trials(
features=features,
input_layer_fn=input_layer_fn,
phoenix_spec=self._phoenix_spec,
shared_input_tensor=shared_input_tensor,
shared_lengths=shared_lengths,
is_training=is_training,
logits_dimension=logits_dimension,
previous_model_dirs=previous_model_dirs,
force_freeze=self._force_freeze,
allow_auxiliary_head=self._allow_auxiliary_head,
caller_generator=self.generator_name(),
my_model_dir=run_config.model_dir)
| 40.101124 | 77 | 0.719529 |
7944de40556337299dca6adc6df436de189a56f9 | 990 | py | Python | glowscript-fix_helix_canvas_error/GlowScriptOffline/Demos/BoxLightTest.py | Form-And-Function/galactic-collision | 42d8069ef6fe5065ee60277525b913640d225ddf | [
"MIT"
] | null | null | null | glowscript-fix_helix_canvas_error/GlowScriptOffline/Demos/BoxLightTest.py | Form-And-Function/galactic-collision | 42d8069ef6fe5065ee60277525b913640d225ddf | [
"MIT"
] | null | null | null | glowscript-fix_helix_canvas_error/GlowScriptOffline/Demos/BoxLightTest.py | Form-And-Function/galactic-collision | 42d8069ef6fe5065ee60277525b913640d225ddf | [
"MIT"
] | 1 | 2020-08-08T17:30:41.000Z | 2020-08-08T17:30:41.000Z | from vpython import *
#GlowScript 2.7 VPython
# Mikhail Temkine, University of Toronto, April 2007
scene.forward = vector(-0.4,-0.3,-1)
r = 3
a1 = a2 = a3 = 0.0
arrow(pos=vector(0, 4, 0), axis=vector(0, 1, 0), color=color.red)
boxy = box(size=vector(3,3,3), color=vector(0.5, 0.5, 0.5), texture=textures.rough)
b1 = sphere(radius=0.3, pos=vector(r, 0, 0), color=color.magenta, emissive=True)
b2 = sphere(radius=0.3, pos=vector(0, 0, r), color=color.yellow, emissive=True)
b3 = arrow(radius=0.3, pos=vector(0, 0, r), color=color.green, emissive=True)
l1 = local_light(pos=b1.pos, color=b1.color)
l2 = local_light(pos=b2.pos, color=b2.color)
l3 = distant_light(direction=b3.pos, color=b3.color)
while True:
rate(100)
l1.pos = b1.pos = r*vector(cos(a1), sin(a1), b1.pos.z)
a1 += 0.02
l2.pos = b2.pos = (r+0.4)*vector(b2.pos.x, sin(a2), cos(a2))
a2 += 0.055
l3.direction = b3.pos = (r+3)*vector(sin(a3), b3.pos.y, cos(a3))
b3.axis = b3.pos * -0.3
a3 += 0.033 | 36.666667 | 83 | 0.645455 |
7944de4c51a9a9cc771fa331c545095d56d3bc21 | 8,928 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_network_interface_ip_configurations_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_network_interface_ip_configurations_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/aio/operations/_network_interface_ip_configurations_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceIPConfigurationsOperations:
"""NetworkInterfaceIPConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get all ip configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> "_models.NetworkInterfaceIPConfiguration":
"""Gets the specified network interface ip configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
| 49.877095 | 220 | 0.681228 |
7944deb1e77c352172165c8c6f361b1f3f58dba3 | 1,181 | py | Python | app_python/aula02.py | Doni-zete/Praticas-Python | 36a877a9f22f9992550fb6e3bdb89c751d6299ef | [
"MIT"
] | null | null | null | app_python/aula02.py | Doni-zete/Praticas-Python | 36a877a9f22f9992550fb6e3bdb89c751d6299ef | [
"MIT"
] | null | null | null | app_python/aula02.py | Doni-zete/Praticas-Python | 36a877a9f22f9992550fb6e3bdb89c751d6299ef | [
"MIT"
] | null | null | null | # a = int(input("Primeiro numero:"))
# b = int(input("Segundo numero:"))
# c = int(input("Terceiro numero:"))
# if a > b and a >c:
# print("O maior numero e {}".format(a))
# elif b> a and b>c:
# print("o maior numero e {}".format(b))
# else:
# print("o maior numero e {}".format(c))
# print ("final do programa")
# a= int (input("Entre com um valor: "))
# b= int (input("Entre com o segundo valor: "))
# resto_a = a%2
# resto_b = b%2
# if resto_a ==0 or not resto_b>0:
# print("numero e par")
# else:
# print(("nenhum numero par digitado "))
nota1 =int (input("Primeira nota: "))
if nota1>10:
a= int(input("Voce digitou a primeira nota errada"))
nota2 =int(input("segunda nota: "))
if nota2>10:
nota2= int(input("Voce digitou a segunda nota errada"))
nota3 =int(input("terceiro nota: "))
if nota3 >10:
nota3 = int(input("Voce digitou a terceira nota errada"))
nota4 =int(input("quarto nota: "))
if nota4 >10:
nota3 = int(input("Voce digitou a quarta nota errada"))
media= (nota1+nota2+nota3+nota4)/4
print("nota do aluno e " + str(media))
# if nota1<=10and nota2<=10and nota3<=10 and nota4<=10:
# else:
# print("Nota digitada errada") | 31.918919 | 61 | 0.629975 |
7944e02815cb5128f9013fb28ec044c1daa7a18c | 286 | py | Python | lunch/lunch/pages/views.py | willynpi/django-tutorial-for-programmers | 9301a5428f04148e3e40b3b57b90f36f7d198fdd | [
"CC-BY-3.0"
] | 300 | 2015-01-07T08:36:15.000Z | 2022-03-08T06:47:06.000Z | lunch/lunch/pages/views.py | willynpi/django-tutorial-for-programmers | 9301a5428f04148e3e40b3b57b90f36f7d198fdd | [
"CC-BY-3.0"
] | 24 | 2015-03-22T07:44:28.000Z | 2018-02-10T09:57:32.000Z | lunch/lunch/pages/views.py | willynpi/django-tutorial-for-programmers | 9301a5428f04148e3e40b3b57b90f36f7d198fdd | [
"CC-BY-3.0"
] | 129 | 2015-01-06T01:58:21.000Z | 2021-09-16T07:27:49.000Z | from django.shortcuts import render
from events.models import Event
def home(request):
try:
current_event = Event.objects.latest()
except Event.DoesNotExist:
current_event = None
return render(request, 'pages/home.html', {'current_event': current_event})
| 23.833333 | 79 | 0.713287 |
7944e0d321aec95ec1a4233974001415dd8cc2d2 | 4,449 | py | Python | sapp/ui/filter_predicates.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 74 | 2020-12-18T20:04:30.000Z | 2022-03-22T22:26:02.000Z | sapp/ui/filter_predicates.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 61 | 2020-12-21T21:33:05.000Z | 2022-01-27T21:22:20.000Z | sapp/ui/filter_predicates.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 20 | 2021-04-08T01:28:53.000Z | 2022-03-22T22:26:05.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import re
from abc import ABC, abstractmethod
from typing import (
TYPE_CHECKING,
Generic,
List,
Optional,
Pattern,
Sequence,
Set,
TypeVar,
Union,
)
from sqlalchemy import Column
from sqlalchemy.orm.query import Query
from sqlalchemy.sql.expression import or_
from typing_extensions import Final
from ..models import DBID
if TYPE_CHECKING:
from .issues import IssueQueryResult # noqa
_Q = TypeVar("_Q")
_T = TypeVar("_T")
class Predicate(ABC):
pass
class QueryPredicate(Predicate):
@abstractmethod
def apply(self, query: Query[_Q]) -> Query[_Q]:
...
class InRange(Generic[_T], QueryPredicate):
def __init__(
self,
column: Union[Column[_T], DBID],
*,
lower: Optional[_T] = None,
upper: Optional[_T] = None,
) -> None:
self._column = column
self._lower: Final[Optional[_T]] = lower
self._upper: Final[Optional[_T]] = upper
def apply(self, query: Query[_Q]) -> Query[_Q]:
if self._lower is not None:
query = query.filter(self._column >= self._lower)
if self._upper is not None:
query = query.filter(self._column <= self._upper)
return query
class Equals(Generic[_T], QueryPredicate):
def __init__(self, column: Union[Column[_T], DBID], to: _T) -> None:
self._column = column
self._to: Final[Optional[_T]] = to
def apply(self, query: Query[_Q]) -> Query[_Q]:
return query.filter(self._column == self._to)
class IsNull(Generic[_T], QueryPredicate):
def __init__(self, column: Union[Column[_T], DBID]) -> None:
self._column = column
def apply(self, query: Query[_Q]) -> Query[_Q]:
return query.filter(self._column is None)
class Like(Generic[_T], QueryPredicate):
def __init__(self, column: Union[Column[_T], DBID], items: Sequence[_T]) -> None:
self._column = column
self._items = items
def apply(self, query: Query[_Q]) -> Query[_Q]:
# pyre-ignore: SQLAlchemy too dynamic.
return query.filter(or_(*[self._column.like(item) for item in self._items]))
class IssuePredicate(Predicate):
@abstractmethod
def apply(self, issues: List[IssueQueryResult]) -> List[IssueQueryResult]:
...
class HasAll(IssuePredicate):
def __init__(self, features: Set[str]) -> None:
self._features = features
def apply(self, issues: List[IssueQueryResult]) -> List[IssueQueryResult]:
return [
issue
for issue in issues
if issue.features & self._features == self._features
]
class Matches(IssuePredicate):
def __init__(self, regex: str, parameter_name: str) -> None:
self._regex: Pattern[str] = re.compile(regex)
self._parameter_name = parameter_name
def attribute_set(self, issue: IssueQueryResult) -> Set[str]:
attribute = issue._asdict()[self._parameter_name]
if isinstance(attribute, str):
return {attribute}
return set(attribute)
def apply(self, issues: List[IssueQueryResult]) -> List[IssueQueryResult]:
return [
issue
for issue in issues
if any(map(self._regex.match, self.attribute_set(issue)))
]
class HasAny(IssuePredicate):
def __init__(self, parameter_list: Set[str], parameter_name: str) -> None:
self._parameter_list = parameter_list
self._parameter_name = parameter_name
def attribute_set(self, issue: IssueQueryResult) -> Set[str]:
attribute = issue._asdict()[self._parameter_name]
if isinstance(attribute, str):
return {attribute}
return set(attribute)
def apply(self, issues: List[IssueQueryResult]) -> List[IssueQueryResult]:
return [
issue
for issue in issues
if not self.attribute_set(issue).isdisjoint(self._parameter_list)
]
class HasNone(IssuePredicate):
def __init__(self, features: Set[str]) -> None:
self._features = features
def apply(self, issues: List[IssueQueryResult]) -> List[IssueQueryResult]:
return [issue for issue in issues if len(issue.features & self._features) == 0]
| 28.703226 | 87 | 0.646662 |
7944e1ac9dbe2368a6c1c0b2402f1e1281d915a1 | 8,753 | py | Python | supervisor/supervisor.py | dkaukov/supervisor | 4beaf571c28ec1dd59011a8e5679ae3b297eb98b | [
"Apache-2.0"
] | 1 | 2021-09-22T00:15:17.000Z | 2021-09-22T00:15:17.000Z | supervisor/supervisor.py | BigElkHunter/cyberockit | fa7140fd9a5ee1316d103628f1f7f4c6db05b158 | [
"Apache-2.0"
] | 62 | 2021-09-02T06:16:18.000Z | 2022-03-31T06:19:11.000Z | supervisor/supervisor.py | BigElkHunter/cyberockit | fa7140fd9a5ee1316d103628f1f7f4c6db05b158 | [
"Apache-2.0"
] | 2 | 2021-09-22T00:13:58.000Z | 2021-09-22T15:06:27.000Z | """Home Assistant control object."""
import asyncio
from contextlib import suppress
from ipaddress import IPv4Address
import logging
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Awaitable, Optional
import aiohttp
from aiohttp.client_exceptions import ClientError
from awesomeversion import AwesomeVersion, AwesomeVersionException
from .const import ATTR_SUPERVISOR_INTERNET, SUPERVISOR_VERSION, URL_HASSIO_APPARMOR
from .coresys import CoreSys, CoreSysAttributes
from .docker.stats import DockerStats
from .docker.supervisor import DockerSupervisor
from .exceptions import (
CodeNotaryError,
CodeNotaryUntrusted,
DockerError,
HostAppArmorError,
SupervisorAppArmorError,
SupervisorError,
SupervisorJobError,
SupervisorUpdateError,
)
from .jobs.decorator import Job, JobCondition
from .resolution.const import ContextType, IssueType
from .utils.codenotary import calc_checksum
_LOGGER: logging.Logger = logging.getLogger(__name__)
class Supervisor(CoreSysAttributes):
"""Home Assistant core object for handle it."""
def __init__(self, coresys: CoreSys):
"""Initialize hass object."""
self.coresys: CoreSys = coresys
self.instance: DockerSupervisor = DockerSupervisor(coresys)
self._connectivity: bool = True
async def load(self) -> None:
"""Prepare Home Assistant object."""
try:
await self.instance.attach(version=self.version)
except DockerError:
_LOGGER.critical("Can't setup Supervisor Docker container!")
with suppress(DockerError):
await self.instance.cleanup(old_image=self.sys_config.image)
@property
def connectivity(self) -> bool:
"""Return true if we are connected to the internet."""
return self._connectivity
@connectivity.setter
def connectivity(self, state: bool) -> None:
"""Set supervisor connectivity state."""
if self._connectivity == state:
return
self._connectivity = state
self.sys_homeassistant.websocket.supervisor_update_event(
"network", {ATTR_SUPERVISOR_INTERNET: state}
)
@property
def ip_address(self) -> IPv4Address:
"""Return IP of Supervisor instance."""
return self.instance.ip_address
@property
def need_update(self) -> bool:
"""Return True if an update is available."""
if self.sys_dev:
return False
try:
return self.version < self.latest_version
except (AwesomeVersionException, TypeError):
return False
@property
def version(self) -> AwesomeVersion:
"""Return version of running Home Assistant."""
return AwesomeVersion(SUPERVISOR_VERSION)
@property
def latest_version(self) -> AwesomeVersion:
"""Return last available version of Home Assistant."""
return self.sys_updater.version_supervisor
@property
def image(self) -> str:
"""Return image name of Home Assistant container."""
return self.instance.image
@property
def arch(self) -> str:
"""Return arch of the Supervisor container."""
return self.instance.arch
async def update_apparmor(self) -> None:
"""Fetch last version and update profile."""
url = URL_HASSIO_APPARMOR
# Fetch
try:
_LOGGER.info("Fetching AppArmor profile %s", url)
timeout = aiohttp.ClientTimeout(total=10)
async with self.sys_websession.get(url, timeout=timeout) as request:
if request.status != 200:
raise SupervisorAppArmorError(
f"Fetching AppArmor Profile from {url} response with {request.status}",
_LOGGER.error,
)
data = await request.text()
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
self.sys_supervisor.connectivity = False
raise SupervisorAppArmorError(
f"Can't fetch AppArmor profile {url}: {str(err) or 'Timeout'}",
_LOGGER.error,
) from err
# Validate
try:
await self.sys_security.verify_own_content(checksum=calc_checksum(data))
except CodeNotaryUntrusted as err:
raise SupervisorAppArmorError(
"Content-Trust is broken for the AppArmor profile fetch!",
_LOGGER.critical,
) from err
except CodeNotaryError as err:
raise SupervisorAppArmorError(
f"CodeNotary error while processing AppArmor fetch: {err!s}",
_LOGGER.error,
) from err
# Load
with TemporaryDirectory(dir=self.sys_config.path_tmp) as tmp_dir:
profile_file = Path(tmp_dir, "apparmor.txt")
try:
profile_file.write_text(data)
except OSError as err:
raise SupervisorAppArmorError(
f"Can't write temporary profile: {err!s}", _LOGGER.error
) from err
try:
await self.sys_host.apparmor.load_profile(
"hassio-supervisor", profile_file
)
except HostAppArmorError as err:
raise SupervisorAppArmorError(
"Can't update AppArmor profile!", _LOGGER.error
) from err
async def update(self, version: Optional[AwesomeVersion] = None) -> None:
"""Update Home Assistant version."""
version = version or self.latest_version
if version == self.sys_supervisor.version:
raise SupervisorUpdateError(
f"Version {version!s} is already installed", _LOGGER.warning
)
# First update own AppArmor
try:
await self.update_apparmor()
except SupervisorAppArmorError as err:
raise SupervisorUpdateError(
f"Abort update because of an issue with AppArmor: {err!s}",
_LOGGER.critical,
) from err
# Update container
_LOGGER.info("Update Supervisor to version %s", version)
try:
await self.instance.install(
version, image=self.sys_updater.image_supervisor
)
await self.instance.update_start_tag(
self.sys_updater.image_supervisor, version
)
except DockerError as err:
self.sys_resolution.create_issue(
IssueType.UPDATE_FAILED, ContextType.SUPERVISOR
)
self.sys_capture_exception(err)
raise SupervisorUpdateError(
f"Update of Supervisor failed: {err!s}", _LOGGER.error
) from err
else:
self.sys_config.version = version
self.sys_config.image = self.sys_updater.image_supervisor
self.sys_config.save_data()
self.sys_create_task(self.sys_core.stop())
@Job(conditions=[JobCondition.RUNNING], on_condition=SupervisorJobError)
async def restart(self) -> None:
"""Restart Supervisor soft."""
self.sys_core.exit_code = 100
self.sys_create_task(self.sys_core.stop())
@property
def in_progress(self) -> bool:
"""Return True if a task is in progress."""
return self.instance.in_progress
def logs(self) -> Awaitable[bytes]:
"""Get Supervisor docker logs.
Return Coroutine.
"""
return self.instance.logs()
def check_trust(self) -> Awaitable[None]:
"""Calculate Supervisor docker content trust.
Return Coroutine.
"""
return self.instance.check_trust()
async def stats(self) -> DockerStats:
"""Return stats of Supervisor."""
try:
return await self.instance.stats()
except DockerError as err:
raise SupervisorError() from err
async def repair(self):
"""Repair local Supervisor data."""
if await self.instance.exists():
return
_LOGGER.info("Repairing Supervisor %s", self.version)
try:
await self.instance.retag()
except DockerError:
_LOGGER.error("Repair of Supervisor failed")
async def check_connectivity(self):
"""Check the connection."""
timeout = aiohttp.ClientTimeout(total=10)
try:
await self.sys_websession.head(
"https://version.home-assistant.io/online.txt", timeout=timeout
)
except (ClientError, asyncio.TimeoutError):
self.connectivity = False
else:
self.connectivity = True
| 34.058366 | 95 | 0.621844 |
7944e1df742cd6e15996bb86f2ddd01c878a394e | 783 | py | Python | wildlifecompliance/migrations/0165_auto_20190503_1352.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
] | 1 | 2020-12-07T17:12:40.000Z | 2020-12-07T17:12:40.000Z | wildlifecompliance/migrations/0165_auto_20190503_1352.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
] | 14 | 2020-01-08T08:08:26.000Z | 2021-03-19T22:59:46.000Z | wildlifecompliance/migrations/0165_auto_20190503_1352.py | preranaandure/wildlifecompliance | bc19575f7bccf7e19adadbbaf5d3eda1d1aee4b5 | [
"Apache-2.0"
] | 15 | 2020-01-08T08:02:28.000Z | 2021-11-03T06:48:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-05-03 05:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wildlifecompliance', '0164_merge_20190502_1153'),
]
operations = [
migrations.RemoveField(
model_name='assessment',
name='activity',
),
migrations.AddField(
model_name='assessment',
name='actioned_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 27.964286 | 121 | 0.662835 |
7944e21f05f33fdee45d157647a1de599bf460a7 | 145 | py | Python | bin/django-admin.py | pabulumm/neighbors | 59f3f3ae727fe52c7897beaf73d157b02cdcb7a3 | [
"BSD-3-Clause"
] | null | null | null | bin/django-admin.py | pabulumm/neighbors | 59f3f3ae727fe52c7897beaf73d157b02cdcb7a3 | [
"BSD-3-Clause"
] | null | null | null | bin/django-admin.py | pabulumm/neighbors | 59f3f3ae727fe52c7897beaf73d157b02cdcb7a3 | [
"BSD-3-Clause"
] | null | null | null | #!/home/tuck/web/neighbors/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 24.166667 | 42 | 0.77931 |
7944e22cf49ca0276d6596fe329c774880857f74 | 7,513 | py | Python | code_artyom/resnet50_classifier_01.py | artyompal/kaggle_salt | 3c323755730745ac7bbfd106f1f20919cceef0ee | [
"MIT"
] | null | null | null | code_artyom/resnet50_classifier_01.py | artyompal/kaggle_salt | 3c323755730745ac7bbfd106f1f20919cceef0ee | [
"MIT"
] | 1 | 2021-03-25T23:31:26.000Z | 2021-03-25T23:31:28.000Z | code_artyom/resnet50_classifier_01.py | artyompal/kaggle_salt | 3c323755730745ac7bbfd106f1f20919cceef0ee | [
"MIT"
] | 1 | 2018-11-08T09:30:38.000Z | 2018-11-08T09:30:38.000Z | #!/usr/bin/python3.6
# Input data files are available in the "../data/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
import sys
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import albumentations as albu
import keras
from keras.applications.resnet50 import ResNet50
from keras.preprocessing.image import load_img
from keras.models import Model, load_model, save_model
from keras.layers import Input,Dropout,BatchNormalization,Activation,Add, Dense, Flatten
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold
from keras.utils import to_categorical
from keras import optimizers
from keras.applications.imagenet_utils import preprocess_input
from sklearn.utils.class_weight import compute_class_weight
from segmentation_models import Unet
from segmentation_models.backbones import get_preprocessing
SEED = 42
VERSION = 2
BATCH_SIZE = 32
NUM_FOLDS = 5
image_size = 197
# Loading of training/testing ids and depths
train_df = pd.read_csv("../data/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("../data/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = pd.DataFrame(index=depths_df[~depths_df.index.isin(train_df.index)].index)
test_df = test_df.join(depths_df)
len(train_df)
train_df["images"] = [np.array(load_img("../data/train/images/{}.png".format(idx), interpolation='nearest',
target_size=(image_size, image_size),
color_mode = "grayscale",)) for idx in tqdm(train_df.index)]
train_df["masks"] = [np.array(load_img("../data/train/masks/{}.png".format(idx), interpolation='nearest',
target_size=(image_size, image_size),
color_mode = "grayscale",)) for idx in tqdm(train_df.index)]
test_df["images"] = [np.array(load_img("../data/test/images/{}.png".format(idx), interpolation='nearest',
target_size=(image_size, image_size),
color_mode = "grayscale")) for idx in tqdm(test_df.index)]
train_df["coverage"] = train_df.masks.map(np.sum) / pow(image_size, 2) / 255
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
def get_class(img, th=10):
img_sum = np.array([i.sum() for i in img])
return np.array(img_sum>th).astype(int)
def add_depth_coord(images):
""" Takes dataset (N, W, H, 1) returns (N, W, H, 3). """
assert(len(images.shape) == 4)
channel1 = np.zeros_like(images)
h = images.shape[1]
for row, const in enumerate(np.linspace(0, 1, h)):
channel1[:, row, ...] = const
channel2 = images * channel1
images = np.concatenate([images, channel1, channel2], axis=-1)
return images
x_train = np.array(train_df.images.tolist()).reshape(-1, image_size, image_size, 1)
y_train = np.array(train_df.masks.tolist()).reshape(-1, image_size, image_size, 1)
# x_test = np.array(test_df.images.tolist()).reshape(-1, image_size, image_size, 1)
train_cls = np.array(train_df.coverage_class)
class Datagen(keras.utils.Sequence):
""" Returns batchs of images which are augmented and resized. """
def __init__(self, x, y, valid):
assert(x.shape[0] == y.shape[0])
self.x = x
self.y = y
self.valid = valid
self.preprocessing_fn = get_preprocessing('resnet50')
SZ = image_size
self.augs = albu.Compose([
# albu.OneOf([albu.RandomSizedCrop(min_max_height=(SZ//2, SZ), height=SZ, width=SZ, p=0.5),
# albu.PadIfNeeded(min_height=SZ, min_width=SZ, p=0.5)], p=1),
# albu.VerticalFlip(p=0.5),
# albu.HorizontalFlip(p=0.5),
# albu.RandomRotate90(p=0.5),
albu.Rotate(p=0.5, limit=10),
albu.OneOf([
albu.ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
albu.GridDistortion(p=0.5),
albu.OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
], p=0.8),
# albu.CLAHE(p=0.8),
# albu.RandomContrast(p=0.8),
albu.RandomBrightness(p=0.8),
albu.RandomGamma(p=0.8)])
print("created Datagen: x", x.shape, "y", y.shape)
def __getitem__(self, idx):
assert(idx < len(self))
x = self.x[idx * BATCH_SIZE : (idx + 1) * BATCH_SIZE]
y = self.y[idx * BATCH_SIZE : (idx + 1) * BATCH_SIZE]
if not self.valid:
xa = []
for image in x :
augmented = self.augs(image=image)
xa.append(augmented["image"].reshape(image_size, image_size, 1))
x = np.array(xa).reshape(-1, image_size, image_size, 1)
x = add_depth_coord(x)
return self.preprocessing_fn(x), y
def __len__(self):
return int(np.ceil(self.x.shape[0] / BATCH_SIZE))
folds = StratifiedKFold(NUM_FOLDS, shuffle=True, random_state=666)
for fold, indices in enumerate(folds.split(x_train, train_df.coverage_class)):
print("==================== fold %d" % fold)
train_idx, valid_idx = indices
x_tr, y_tr = x_train[train_idx], y_train[train_idx]
x_val, y_val = x_train[valid_idx], y_train[valid_idx]
#Data augmentation
x_tr = np.append(x_tr, [np.fliplr(x) for x in x_tr], axis=0)
y_tr = get_class(np.append(y_tr, [np.fliplr(x) for x in y_tr], axis=0)).flatten()
y_val = get_class(y_val).flatten()
resnet_model = ResNet50(input_shape=(image_size, image_size, 3), weights='imagenet', include_top=False)
input_x = resnet_model.input
output_layer = Flatten()(resnet_model.output)
output_layer = Dense(1, activation='sigmoid')(output_layer)
model = Model(input_x, output_layer)
learning_rate = 0.001
c = optimizers.adam(lr = learning_rate)
model.compile(optimizer=c, loss='binary_crossentropy', metrics=['accuracy'])
save_model_name = '../output/resnet50_class_v%d_fold%d_acc{val_acc:.02f}_epoch{epoch:02d}.model' % (VERSION, fold)
early_stopping = EarlyStopping(monitor='val_acc', mode = 'max', patience=10, verbose=1)
model_checkpoint = ModelCheckpoint(save_model_name, monitor='val_acc',
mode = 'max', save_best_only=True, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_acc', mode = 'max', factor=0.5, patience=5, min_lr=0.0001, verbose=1)
epochs = 400
batch_size = 32
cw = compute_class_weight("balanced", np.unique(y_tr), y_tr)
print("class_weight: ", cw)
history = model.fit_generator(Datagen(x_tr, y_tr, valid=False),
validation_data=Datagen(x_val, y_val, valid=True),
epochs=epochs, callbacks=[early_stopping, model_checkpoint, reduce_lr],
use_multiprocessing=True, workers=12,
shuffle=False, verbose=1,
class_weight=cw)
# classes_df = pd.DataFrame(index=test_df.index)
# classes_df['class'] = model.predict(preprocess_input(add_depth_coord(x_test)))
# test_df.to_csv(f'../output/resnet50_class_v{VERSION}.csv', index=True)
| 39.751323 | 118 | 0.64435 |
7944e2b82d57ded30850d83544449ea3b788164e | 6,800 | py | Python | userbot/modules/vctools.py | RyuuXS/trans | 89d5e0ab2c6c0ec68f4c0cabf5574d67f1e2a7fe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2022-02-03T02:26:45.000Z | 2022-03-28T07:24:42.000Z | userbot/modules/vctools.py | RyuuXS/trans | 89d5e0ab2c6c0ec68f4c0cabf5574d67f1e2a7fe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2022-02-07T13:21:21.000Z | 2022-02-07T13:21:21.000Z | userbot/modules/vctools.py | RyuuXS/trans | 89d5e0ab2c6c0ec68f4c0cabf5574d67f1e2a7fe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2022-02-02T03:22:09.000Z | 2022-03-22T09:07:30.000Z | # Copyright (C) 2021 TeamUltroid
#
# This file is a part of < https://github.com/TeamUltroid/Ultroid/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.
#
# Ported by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
#
# Kalo mau ngecopas, jangan hapus credit ya goblok
from pytgcalls import StreamType
from pytgcalls.exceptions import AlreadyJoinedError
from pytgcalls.types.input_stream import InputAudioStream, InputStream
from telethon.tl.functions.channels import GetFullChannelRequest as getchat
from telethon.tl.functions.phone import CreateGroupCallRequest as startvc
from telethon.tl.functions.phone import DiscardGroupCallRequest as stopvc
from telethon.tl.functions.phone import EditGroupCallTitleRequest as settitle
from telethon.tl.functions.phone import GetGroupCallRequest as getvc
from telethon.tl.functions.phone import InviteToGroupCallRequest as invitetovc
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, call_py
from userbot.events import register
from userbot.utils import edit_delete, edit_or_reply, trans_cmd
async def get_call(event):
mm = await event.client(getchat(event.chat_id))
xx = await event.client(getvc(mm.full_chat.call, limit=1))
return xx.call
def user_list(l, n):
for i in range(0, len(l), n):
yield l[i : i + n]
@trans_cmd(pattern="startvc$")
@register(pattern=r"^\.startvcs$", sudo=True)
async def start_voice(c):
me = await c.client.get_me()
chat = await c.get_chat()
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
await edit_delete(c, f"**Maaf {me.first_name} Bukan Admin 👮**")
return
try:
await c.client(startvc(c.chat_id))
await edit_or_reply(c, "`Voice Chat Started...`")
except Exception as ex:
await edit_delete(c, f"**ERROR:** `{ex}`")
@trans_cmd(pattern="stopvc$")
@register(pattern=r"^\.stopvcs$", sudo=True)
async def stop_voice(c):
me = await c.client.get_me()
chat = await c.get_chat()
admin = chat.admin_rights
creator = chat.creator
if not admin and not creator:
await edit_delete(c, f"**Maaf {me.first_name} Bukan Admin 👮**")
return
try:
await c.client(stopvc(await get_call(c)))
await edit_or_reply(c, "`Voice Chat Stopped...`")
except Exception as ex:
await edit_delete(c, f"**ERROR:** `{ex}`")
@trans_cmd(pattern="vcinvite")
async def _(c):
xxnx = await edit_or_reply(c, "`Inviting Members to Voice Chat...`")
users = []
z = 0
async for x in c.client.iter_participants(c.chat_id):
if not x.bot:
users.append(x.id)
botman = list(user_list(users, 6))
for p in botman:
try:
await c.client(invitetovc(call=await get_call(c), users=p))
z += 6
except BaseException:
pass
await xxnx.edit(f"`{z}` **Orang Berhasil diundang ke VCG**")
@trans_cmd(pattern="vctitle(?: |$)(.*)")
@register(pattern=r"^\.cvctitle$", sudo=True)
async def change_title(e):
title = e.pattern_match.group(1)
me = await e.client.get_me()
chat = await e.get_chat()
admin = chat.admin_rights
creator = chat.creator
if not title:
return await edit_delete(e, "**Silahkan Masukan Title Obrolan Suara Grup**")
if not admin and not creator:
await edit_delete(e, f"**Maaf {me.first_name} Bukan Admin 👮**")
return
try:
await e.client(settitle(call=await get_call(e), title=title.strip()))
await edit_or_reply(e, f"**Berhasil Mengubah Judul VCG Menjadi** `{title}`")
except Exception as ex:
await edit_delete(e, f"**ERROR:** `{ex}`")
@trans_cmd(pattern="joinvc(?: |$)(.*)")
@register(pattern=r"^\.joinvcs(?: |$)(.*)", sudo=True)
async def _(event):
Man = await edit_or_reply(event, "`Processing...`")
if len(event.text.split()) > 1:
chat_id = event.text.split()[1]
try:
chat_id = await event.client.get_peer_id(int(chat_id))
except Exception as e:
return await Man.edit(f"**ERROR:** `{e}`")
else:
chat_id = event.chat_id
file = "./userbot/resources/audio-man.mp3"
if chat_id:
try:
await call_py.join_group_call(
chat_id,
InputStream(
InputAudioStream(
file,
),
),
stream_type=StreamType().local_stream,
)
await Man.edit(
f"⌛️**Berhasil Join Ke Obrolan Suara 🥵**\n└ **Chat ID:** `{chat_id}`"
)
except AlreadyJoinedError:
await call_py.leave_group_call(chat_id)
await edit_delete(
Man,
"**ERROR:** `Karena akun sedang berada di obrolan suara`\n\n• Silahkan coba `.joinvc` lagi",
45,
)
except Exception as e:
await Man.edit(f"**INFO:** `{e}`")
@trans_cmd(pattern="leavevc(?: |$)(.*)")
@register(pattern=r"^\.leavevcs(?: |$)(.*)", sudo=True)
async def vc_end(event):
Man = await edit_or_reply(event, "`Processing...`")
if len(event.text.split()) > 1:
chat_id = event.text.split()[1]
try:
chat_id = await event.client.get_peer_id(int(chat_id))
except Exception as e:
return await Man.edit(f"**ERROR:** `{e}`")
else:
chat_id = event.chat_id
if chat_id:
try:
await call_py.leave_group_call(chat_id)
await edit_delete(
Man,
f"`Gimana sih Tadi suruh naik, Sekarang malah suruh turun😒.`\n└ **Chat ID:** `{chat_id}`",
)
except Exception as e:
await Man.edit(f"**INFO:** `{e}`")
CMD_HELP.update(
{
"vctools": f"**Plugin : **`vctools`\
\n\n • **Syntax :** `{cmd}startvc`\
\n • **Function : **Untuk Memulai voice chat group\
\n\n • **Syntax :** `{cmd}stopvc`\
\n • **Function : **Untuk Memberhentikan voice chat group\
\n\n • **Syntax :** `{cmd}joinvc` atau `{cmd}joinvc` <chatid/username gc>\
\n • **Function : **Untuk Bergabung ke voice chat group\
\n\n • **Syntax :** `{cmd}leavevc` atau `{cmd}leavevc` <chatid/username gc>\
\n • **Function : **Untuk Turun dari voice chat group\
\n\n • **Syntax :** `{cmd}vctitle` <title vcg>\
\n • **Function : **Untuk Mengubah title/judul voice chat group\
\n\n • **Syntax :** `{cmd}vcinvite`\
\n • **Function : **Mengundang Member group ke voice chat group (anda harus sambil bergabung ke OS/VCG)\
"
}
)
| 34.871795 | 114 | 0.609559 |
7944e35d8abf604e7f09702fc50c7e62cc4d009c | 49,372 | py | Python | django/db/models/sql/compiler.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | null | null | null | django/db/models/sql/compiler.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | null | null | null | django/db/models/sql/compiler.py | doismellburning/django | 039465a6a7a18f48ea77ceadb6949990c0ec92e1 | [
"BSD-3-Clause"
] | null | null | null | from itertools import chain
import re
import warnings
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (CURSOR, SINGLE, MULTI, NO_RESULTS,
ORDER_DIR, GET_ITERATOR_CHUNK_SIZE)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import get_order_dir, Query
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six.moves import zip
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.tables):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Returns a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having = self.query.having.get_group_by_cols()
for expr in having:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key. Currently only the MySQL form is
# implemented.
# MySQLism: however, columns in HAVING clause must be added to the
# GROUP BY.
if self.connection.features.allows_group_by_pk:
# The logic here is: if the main model's primary key is in the
# query, then set new_expressions to that field. If that happens,
# then also add having expressions to group by.
pk = None
for expr in expressions:
if (expr.output_field.primary_key and
getattr(expr.output_field, 'model') == self.query.model):
pk = expr
if pk:
expressions = [pk] + [expr for expr in expressions if expr in having]
return expressions
def get_select(self):
"""
Returns three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
ret.append((col, self.compile(col, select_format=True), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Returns a list of 2-tuples of form (expr, (sql, params)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for pos, field in enumerate(ordering):
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = True if order == 'DESC' else False
if col in self.query.annotation_select:
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
select_sql = [t[1] for t in select]
if self.query.distinct and not self.query.distinct_fields:
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def __call__(self, name):
"""
Backwards-compatibility shim so that calling a SQLCompiler is equivalent to
calling its quote_name_unless_alias method.
"""
warnings.warn(
"Calling a SQLCompiler directly is deprecated. "
"Call compiler.quote_name_unless_alias instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.quote_name_unless_alias(name)
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or name in self.query.external_aliases):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format:
return node.output_field.select_format(self, sql, params)
return sql, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.query.where)
having, h_params = self.compile(self.query.having)
params = []
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented.")
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
# If we've been asked for a NOWAIT query but the backend does
# not support it, raise a DatabaseError otherwise we could get
# an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql()
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.rel and path and opts.ordering and name != field.attname:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_order_by and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or (cur_depth == 1 and f.name in requested):
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.rel.to,
'field': f,
'reverse': False,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.rel.to._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.rel.to._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
_, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[-1]
from_parent = issubclass(model, opts.model)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters, field_converters, expression)
return converters
def apply_converters(self, row, converters):
row = list(row)
for pos, (backend_converters, field_converters, field) in converters.items():
value = row[pos]
for converter in backend_converters:
value = converter(value, field, self.query.context)
for converter in field_converters:
value = converter(value, self.connection, self.query.context)
row[pos] = value
return tuple(row)
def results_iter(self, results=None):
"""
Returns an iterator over the results from executing this query.
"""
converters = None
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count
)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(
getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True),
connection=self.connection
) for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
elif hasattr(val, 'prepare_database_save'):
if field.rel:
val = val.prepare_database_save(field)
else:
raise TypeError("Database is trying to update a relational field "
"of type %s with a value of type %s. Make sure "
"you are setting the correct relations" %
(field.__class__.__name__, val.__class__.__name__))
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super(SQLUpdateCompiler, self).pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
# Empty SQL for the inner query is a marker that the inner query
# isn't going to produce any results. This can happen when doing
# LIMIT 0 queries (generated by qs[:0]) for example.
if not self.query.subquery:
raise EmptyResultSet
sql, params = [], []
for annotation in self.query.annotation_select.values():
agg_sql, agg_params = self.compile(annotation)
sql.append(agg_sql)
params.extend(agg_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[0:col_count] for r in rows]
finally:
cursor.close()
| 43.270815 | 98 | 0.5812 |
7944e3d03fe934f95a02930bf458e5876d5ff31f | 36,346 | py | Python | run.py | catenacyber/suricata-verify | e37080298f20e43c36101fc7176dd10b0d740017 | [
"MIT"
] | 1 | 2021-11-06T15:41:14.000Z | 2021-11-06T15:41:14.000Z | run.py | catenacyber/suricata-verify | e37080298f20e43c36101fc7176dd10b0d740017 | [
"MIT"
] | null | null | null | run.py | catenacyber/suricata-verify | e37080298f20e43c36101fc7176dd10b0d740017 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
#
# Copyright (C) 2017-2021 Open Information Security Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import sys
import os
import os.path
import subprocess
import threading
import shutil
import string
import argparse
import yaml
import glob
import re
import json
import unittest
import multiprocessing as mp
from collections import namedtuple
import threading
import filecmp
import yaml
WIN32 = sys.platform == "win32"
LINUX = sys.platform.startswith("linux")
suricata_bin = "src\suricata.exe" if WIN32 else "./src/suricata"
suricata_yaml = "suricata.yaml" if WIN32 else "./suricata.yaml"
if LINUX:
manager = mp.Manager()
lock = mp.Lock()
failedLogs = manager.list()
count_dict = manager.dict()
check_args = manager.dict()
else:
failedLogs = []
count_dict = {}
check_args = {}
# Bring in a lock from threading to satisfy the MP semantics when
# not using MP.
lock = threading.Lock()
count_dict['passed'] = 0
count_dict['failed'] = 0
count_dict['skipped'] = 0
check_args['fail'] = 0
class SelfTest(unittest.TestCase):
def test_parse_suricata_version(self):
version = parse_suricata_version("4.0.0")
self.assertEqual(
(4, 0, 0), (version.major, version.minor, version.patch))
version = parse_suricata_version("444.444.444")
self.assertEqual(
(444, 444, 444), (version.major, version.minor, version.patch))
version = parse_suricata_version("4.1.0-dev")
self.assertEqual(
(4, 1, 0), (version.major, version.minor, version.patch))
version = parse_suricata_version("4")
self.assertEqual(
(4, 0, 0), (version.major, version.minor, version.patch))
version = parse_suricata_version("4.0.3")
self.assertEqual(
(4, 0, 3), (version.major, version.minor, version.patch))
def test_version_equal(self):
self.assertTrue(Version().is_equal(SuricataVersion(5, 0, 0), SuricataVersion(5, 0, 0)))
self.assertTrue(Version().is_equal(SuricataVersion(5, 1, 0), SuricataVersion(5, None, None)))
self.assertFalse(Version().is_equal(SuricataVersion(4, 1, 0), SuricataVersion(5, None, None)))
def test_version_lt(self):
comp = Version()
self.assertTrue(comp.is_lt(SuricataVersion(5, 0, 3), SuricataVersion(6, None, None)))
self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(6, 0, 1)))
self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(6, 1, 1)))
self.assertFalse(comp.is_lt(SuricataVersion(6, 1, 2), SuricataVersion(6, 1, 1)))
self.assertTrue(comp.is_lt(SuricataVersion(6, 0, 0), SuricataVersion(7, 0, 0)))
class TestError(Exception):
pass
class UnsatisfiedRequirementError(Exception):
pass
class TerminatePoolError(Exception):
pass
SuricataVersion = namedtuple(
"SuricataVersion", ["major", "minor", "patch"])
def parse_suricata_version(buf):
m = re.search("(?:Suricata version |^)(\d+)\.?(\d+)?\.?(\d+)?.*", str(buf).strip())
if m:
major = int(m.group(1)) if m.group(1) else 0
minor = int(m.group(2)) if m.group(2) else 0
patch = int(m.group(3)) if m.group(3) else 0
return SuricataVersion(
major=major, minor=minor, patch=patch)
return None
def get_suricata_version():
output = subprocess.check_output([suricata_bin, "-V"])
return parse_suricata_version(output)
def pipe_reader(fileobj, output=None, verbose=False):
for line in fileobj:
line = line.decode()
if output:
output.write(line)
output.flush()
if verbose:
print(line.strip())
def handle_exceptions(func):
def applicator(*args, **kwargs):
result = False
try:
result = func(*args,**kwargs)
except TestError as te:
print("===> {}: Sub test #{}: FAIL : {}".format(kwargs["test_name"], kwargs["test_num"], te))
check_args_fail()
kwargs["count"]["failure"] += 1
except UnsatisfiedRequirementError as ue:
print("===> {}: Sub test #{}: SKIPPED : {}".format(kwargs["test_name"], kwargs["test_num"], ue))
kwargs["count"]["skipped"] += 1
else:
if result:
kwargs["count"]["success"] += 1
else:
print("\n===> {}: Sub test #{}: FAIL : {}".format(kwargs["test_name"], kwargs["test_num"], kwargs["check"]["args"]))
kwargs["count"]["failure"] += 1
return kwargs["count"]
return applicator
class Version:
"""
Class to compare Suricata versions.
"""
def is_equal(self, a, b):
"""Check if version a and version b are equal in a semantic way.
For example:
- 4 would match 4, 4.x and 4.x.y.
- 4.0 would match 4.0.x.
- 4.0.3 would match only 4.0.3.
"""
if not a.major == b.major:
return False
if a.minor is not None and b.minor is not None:
if a.minor != b.minor:
return False
if a.patch is not None and b.patch is not None:
if a.patch != b.patch:
return False
return True
def is_gte(self, v1, v2):
"""Return True if v1 is great than or equal to v2."""
if v1.major < v2.major:
return False
elif v1.major > v2.major:
return True
if v1.minor < v2.minor:
return False
elif v1.minor > v2.minor:
return True
if v1.patch < v2.patch:
return False
return True
def is_lt(self, v1, v2):
"""Return True if v1 is less than v2."""
if v1.major < v2.major:
return True
elif v1.minor < v2.minor:
return True
elif v1.patch < v2.patch:
return True
return False
class SuricataConfig:
def __init__(self, version):
self.version = version
self.features = set()
self.config = {}
self.load_build_info()
def load_build_info(self):
output = subprocess.check_output([suricata_bin, "--build-info"])
start_support = False
for line in output.splitlines():
if line.decode().startswith("Features:"):
self.features = set(line.decode().split()[1:])
if "Suricata Configuration" in line.decode():
start_support = True
if start_support and "support:" in line.decode():
(fkey, val) = line.decode().split(" support:")
fkey = fkey.strip()
val = val.strip()
if val.startswith("yes"):
self.features.add(fkey)
def load_config(self, config_filename):
output = subprocess.check_output([
suricata_bin,
"-c", config_filename,
"--dump-config"])
self.config = {}
for line in output.decode("utf-8").split("\n"):
parts = [p.strip() for p in line.split("=", 1)]
if parts and parts[0]:
if len(parts) > 1:
val = parts[1]
else:
val = ""
self.config[parts[0]] = val
def has_feature(self, feature):
return feature in self.features
def check_requires(requires, suricata_config: SuricataConfig):
suri_version = suricata_config.version
for key in requires:
if key == "min-version":
min_version = requires["min-version"]
if not is_version_compatible(version=min_version,
suri_version=suri_version, expr="gte"):
raise UnsatisfiedRequirementError(
"requires at least version {}".format(min_version))
elif key == "lt-version":
lt_version = requires["lt-version"]
if not is_version_compatible(version=lt_version,
suri_version=suri_version, expr="lt"):
raise UnsatisfiedRequirementError(
"for version less than {}".format(lt_version))
elif key == "version":
req_version = requires["version"]
if not is_version_compatible(version=req_version,
suri_version=suri_version, expr="equal"):
raise UnsatisfiedRequirementError(
"only for version {}".format(req_version))
elif key == "features":
for feature in requires["features"]:
if not suricata_config.has_feature(feature):
raise UnsatisfiedRequirementError(
"requires feature %s" % (feature))
elif key == "env":
for env in requires["env"]:
if not env in os.environ:
raise UnsatisfiedRequirementError(
"requires env var %s" % (env))
elif key == "files":
for filename in requires["files"]:
if not os.path.exists(filename):
raise UnsatisfiedRequirementError(
"requires file %s" % (filename))
elif key == "script":
for script in requires["script"]:
try:
subprocess.check_call("%s" % script, shell=True)
except:
raise UnsatisfiedRequirementError(
"requires script returned false")
elif key == "pcap":
# Handle below...
pass
else:
raise Exception("unknown requires types: %s" % (key))
def find_value(name, obj):
"""Find the value in an object for a field specified by name.
Example names:
event_type
alert.signature_id
smtp.rcpt_to[0]
"""
parts = name.split(".")
for part in parts:
if part == "__len":
# Get the length of the object. Return -1 if the object is
# not a type that has a length (numbers).
try:
return len(obj)
except:
return -1
name = None
index = None
m = re.match("^(.*)\[(\d+)\]$", part)
if m:
name = m.group(1)
index = m.group(2)
else:
name = part
if not name in obj:
return None
obj = obj[name]
if index is not None:
try:
obj = obj[int(index)]
except:
return None
return obj
def is_version_compatible(version, suri_version, expr):
config_version = parse_suricata_version(version)
version_obj = Version()
func = getattr(version_obj, "is_{}".format(expr))
if not func(suri_version, config_version):
return False
return True
class FileCompareCheck:
def __init__(self, config, directory):
self.config = config
self.directory = directory
def run(self):
if WIN32:
print("skipping shell check on windows")
return True;
expected = os.path.join(self.directory, self.config["expected"])
filename = self.config["filename"]
if filecmp.cmp(expected, filename):
return True
else:
raise TestError("%s %s \nFAILED: verification failed" % (expected, filename))
class ShellCheck:
def __init__(self, config):
self.config = config
def run(self):
if not self.config or "args" not in self.config:
raise TestError("shell check missing args")
try:
if WIN32:
print("skipping shell check on windows")
return True;
output = subprocess.check_output(self.config["args"], shell=True)
if "expect" in self.config:
return str(self.config["expect"]) == output.decode().strip()
return True
except subprocess.CalledProcessError as err:
raise TestError(err)
class StatsCheck:
def __init__(self, config, outdir):
self.config = config
self.outdir = outdir
def run(self):
stats = None
with open("eve.json", "r") as fileobj:
for line in fileobj:
event = json.loads(line)
if event["event_type"] == "stats":
stats = event["stats"]
for key in self.config:
val = find_value(key, stats)
if val != self.config[key]:
raise TestError("stats.%s: expected %s; got %s" % (
key, str(self.config[key]), str(val)))
return True
class FilterCheck:
def __init__(self, config, outdir, suricata_config):
self.config = config
self.outdir = outdir
self.suricata_config = suricata_config
self.suri_version = suricata_config.version
def run(self):
requires = self.config.get("requires", {})
req_version = self.config.get("version")
min_version = self.config.get("min-version")
if req_version is not None:
requires["version"] = req_version
if min_version is not None:
requires["min-version"] = min_version
feature = self.config.get("feature")
if feature is not None:
requires["features"] = [feature]
check_requires(requires, self.suricata_config)
if "filename" in self.config:
json_filename = self.config["filename"]
else:
json_filename = "eve.json"
if not os.path.exists(json_filename):
raise TestError("%s does not exist" % (json_filename))
count = 0
with open(json_filename, "r") as fileobj:
for line in fileobj:
event = json.loads(line)
if self.match(event):
count += 1
if count == self.config["count"]:
return True
if "comment" in self.config:
raise TestError("%s: expected %d, got %d" % (
self.config["comment"], self.config["count"], count))
raise TestError("expected %d matches; got %d for filter %s" % (
self.config["count"], count, str(self.config)))
def match(self, event):
for key, expected in self.config["match"].items():
if key == "has-key":
val = find_value(expected, event)
if val is None:
return False
elif key == "not-has-key":
val = find_value(expected, event)
if val is not None:
return False
else:
val = find_value(key, event)
if val != expected:
if str(val) == str(expected):
print("Different types but same string", type(val), val, type(expected), expected)
return False
return False
return True
class TestRunner:
def __init__(self, cwd, directory, outdir, suricata_config, verbose=False, force=False):
self.cwd = cwd
self.directory = directory
self.suricata_config = suricata_config
self.verbose = verbose
self.force = force
self.output = outdir
# The name is just the directory name.
self.name = os.path.basename(self.directory)
# List of thread readers.
self.readers = []
# Load the test configuration.
self.config = None
self.load_config()
self.suricata_config.load_config(self.get_suricata_yaml_path())
def load_config(self):
if os.path.exists(os.path.join(self.directory, "test.yaml")):
self.config = yaml.safe_load(
open(os.path.join(self.directory, "test.yaml"), "rb"))
if self.config is None:
self.config = {}
def setup(self):
if "setup" in self.config:
for setup in self.config["setup"]:
for command in setup:
if command == "script":
subprocess.check_call(
"%s" % setup[command],
shell=True,
cwd=self.output)
def check_skip(self):
if not "skip" in self.config:
return
if isinstance(self.config["skip"], bool):
if self.config["skip"]:
raise UnsatisfiedRequirementError("skipped by default")
return
for skip in self.config["skip"]:
if "uid" in skip:
if WIN32:
raise UnsatisfiedRequirementError("uid based skip not supported on Windows")
if os.getuid() == skip["uid"]:
if "msg" in skip:
msg = skip["msg"]
else:
msg = "not for uid %d" % (skip["uid"])
raise UnsatisfiedRequirementError(msg)
if "feature" in skip:
if self.suricata_config.has_feature(skip["feature"]):
if "msg" in skip:
msg = skip["msg"]
else:
msg = "not for feature %s" % (skip["feature"])
raise UnsatisfiedRequirementError(msg)
if "config" in skip:
for pattern, need_val in skip["config"].items():
for key, val in self.suricata_config.config.items():
if re.match(pattern, key):
if str(need_val) == str(val):
raise UnsatisfiedRequirementError(
"not for %s = %s" % (
key, need_val))
def check_requires(self):
requires = self.config.get("requires", {})
check_requires(requires, self.suricata_config)
# Check if a pcap is required or not. By default a pcap is
# required unless a "command" has been provided.
if not "command" in self.config:
if "pcap" in requires:
pcap_required = requires["pcap"]
else:
pcap_required = True
if pcap_required and not "pcap" in self.config:
if not glob.glob(os.path.join(self.directory, "*.pcap")) + \
glob.glob(os.path.join(self.directory, "*.pcapng")):
raise UnsatisfiedRequirementError("No pcap file found")
def run(self):
if not self.force:
self.check_requires()
self.check_skip()
if WIN32 and os.path.exists(os.path.join(self.directory, "check.sh")):
raise UnsatisfiedRequirementError("check.sh tests are not supported on Windows")
if WIN32 and "setup" in self.config:
raise UnsatisfiedRequirementError("test \"setup\" not supported on Windows")
shell = False
if "command" in self.config:
# on Windows skip 'command' tests
if WIN32:
raise UnsatisfiedRequirementError("\"command\" tests are not supported on Windows")
args = self.config["command"]
shell = True
else:
args = self.default_args()
extraenv = {
# The suricata source directory.
"SRCDIR": self.cwd,
"TZ": "UTC",
"TEST_DIR": self.directory,
"OUTPUT_DIR": self.output,
"ASAN_OPTIONS": "detect_leaks=0",
}
env = os.environ.copy()
env.update(extraenv)
if "count" in self.config:
count = self.config["count"]
else:
count = 1
if "exit-code" in self.config:
expected_exit_code = self.config["exit-code"]
else:
expected_exit_code = 0
for _ in range(count):
# Cleanup the output directory.
if os.path.exists(self.output):
shutil.rmtree(self.output)
os.makedirs(self.output)
self.setup()
stdout = open(os.path.join(self.output, "stdout"), "w")
stderr = open(os.path.join(self.output, "stderr"), "w")
if shell:
template = string.Template(args)
cmdline = template.substitute(env)
else:
cmdline = " ".join(args) + "\n"
open(os.path.join(self.output, "cmdline"), "w").write(cmdline)
p = subprocess.Popen(
args, shell=shell, cwd=self.directory, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.start_reader(p.stdout, stdout)
self.start_reader(p.stderr, stderr)
for r in self.readers:
r.join()
r = p.wait()
if r != expected_exit_code:
raise TestError("got exit code %d, expected %d" % (
r, expected_exit_code));
check_value = self.check()
if check_value["check_sh"]:
return check_value
if not check_value["failure"] and not check_value["skipped"]:
print("===> %s: OK%s" % (os.path.basename(self.directory), " (%dx)" % count if count > 1 else ""))
elif not check_value["failure"]:
print("===> {}: OK (checks: {}, skipped: {})".format(os.path.basename(self.directory), sum(check_value.values()), check_value["skipped"]))
return check_value
def pre_check(self):
if "pre-check" in self.config:
subprocess.call(self.config["pre-check"], shell=True)
@handle_exceptions
def perform_filter_checks(self, check, count, test_num, test_name):
count = FilterCheck(check, self.output,
self.suricata_config).run()
return count
@handle_exceptions
def perform_shell_checks(self, check, count, test_num, test_name):
count = ShellCheck(check).run()
return count
@handle_exceptions
def perform_stats_checks(self, check, count, test_num, test_name):
count = StatsCheck(check, self.output).run()
return count
@handle_exceptions
def perform_file_compare_checks(self, check, count, test_num, test_name):
count = FileCompareCheck(check, self.directory).run()
return count
def reset_count(self, dictionary):
for k in dictionary.keys():
dictionary[k] = 0
def check(self):
pdir = os.getcwd()
os.chdir(self.output)
count = {
"success": 0,
"failure": 0,
"skipped": 0,
"check_sh": 0,
}
try:
self.pre_check()
if "checks" in self.config:
self.reset_count(count)
for check_count, check in enumerate(self.config["checks"]):
for key in check:
if key in ["filter", "shell", "stats", "file-compare"]:
func = getattr(self, "perform_{}_checks".format(key.replace("-","_")))
count = func(check=check[key], count=count,
test_num=check_count + 1, test_name=os.path.basename(self.directory))
else:
print("FAIL: Unknown check type: {}".format(key))
finally:
os.chdir(pdir)
if count["failure"] or count["skipped"]:
return count
# Old style check script.
pdir = os.getcwd()
os.chdir(self.output)
try:
if not os.path.exists(os.path.join(self.directory, "check.sh")):
success_c = count["success"]
# Covering cases like "tests/show-help" which do not have
# check.sh and/or no checks in test.yaml should be counted
# successful
count["success"] = 1 if not success_c else success_c
return count
extraenv = {
# The suricata source directory.
"SRCDIR": self.cwd,
"TZ": "UTC",
"TEST_DIR": self.directory,
"OUTPUT_DIR": self.output,
"TOPDIR": TOPDIR,
}
env = os.environ.copy()
env.update(extraenv)
r = subprocess.call(
[os.path.join(self.directory, "check.sh")], env=env)
if r != 0:
print("FAILED: verification failed")
count["failure"] = 1
count["check_sh"] = 1
return count
else:
count["success"] = 1
return count
finally:
os.chdir(pdir)
def default_args(self):
args = []
if self.suricata_config.valgrind:
suppression_opt = "--suppressions=%s" % os.path.join(self.cwd, "qa/valgrind.suppress")
args += [ "valgrind", "-v", "--error-exitcode=255", suppression_opt ]
args += [
os.path.join(self.cwd, "src/suricata"),
]
# Load args from config file.
if "args" in self.config:
assert(type(self.config["args"]) == type([]))
for arg in self.config["args"]:
args += re.split("\s", arg)
# In Suricata 5.0 the classification.config and
# reference.config were moved into the etc/ directory. For now
# check there and the top level directory to still support
# 4.1.
classification_configs = [
os.path.join(self.cwd, "etc", "classification.config"),
os.path.join(self.cwd, "classification.config"),
]
for config in classification_configs:
if os.path.exists(config):
args += ["--set", "classification-file=%s" % config]
break
reference_configs = [
os.path.join(self.cwd, "etc", "reference.config"),
os.path.join(self.cwd, "reference.config"),
]
for config in reference_configs:
if os.path.exists(config):
args += ["--set", "reference-config-file=%s" % config]
break
# Add other fixed arguments.
args += [
"--init-errors-fatal",
"-l", self.output,
]
if "ips" in self.name:
args.append("--simulate-ips")
args += ["-c", self.get_suricata_yaml_path()]
# Find pcaps.
if "pcap" in self.config:
args += ["-r", self.config["pcap"]]
else:
pcaps = glob.glob(os.path.join(self.directory, "*.pcap"))
pcaps += glob.glob(os.path.join(self.directory, "*.pcapng"))
if len(pcaps) > 1:
raise TestError("More than 1 pcap file found")
if pcaps:
args += ["-r", pcaps[0]]
# Find rules.
rules = glob.glob(os.path.join(self.directory, "*.rules"))
if not rules:
args.append("--disable-detection")
elif len(rules) == 1:
args += ["-S", rules[0]]
else:
raise TestError("More than 1 rule file found")
return args
def get_suricata_yaml_path(self):
"""Return the path to the suricata.yaml that will be used for this
test."""
if os.path.exists(os.path.join(self.directory, "suricata.yaml")):
return os.path.join(self.directory, "suricata.yaml")
return os.path.join(self.cwd, "suricata.yaml")
def start_reader(self, input, output):
t = threading.Thread(
target=pipe_reader, args=(input, output, self.verbose))
t.start()
self.readers.append(t)
def check_args_fail():
if args.fail:
with lock:
check_args['fail'] = 1
def check_deps():
try:
cmd = "jq --version > nil" if WIN32 else "jq --version > /dev/null 2>&1"
subprocess.check_call(cmd, shell=True)
except:
print("error: jq is required")
return False
try:
cmd = "echo suricata | xargs > nil" if WIN32 else "echo | xargs > /dev/null 2>&1"
subprocess.check_call(cmd, shell=True)
except:
print("error: xargs is required")
return False
return True
def run_test(dirpath, args, cwd, suricata_config):
with lock:
if check_args['fail'] == 1:
raise TerminatePoolError()
name = os.path.basename(dirpath)
outdir = os.path.join(dirpath, "output")
if args.outdir:
outdir = os.path.join(os.path.realpath(args.outdir), name, "output")
test_runner = TestRunner(
cwd, dirpath, outdir, suricata_config, args.verbose, args.force)
try:
results = test_runner.run()
if results["failure"] > 0:
with lock:
count_dict["failed"] += 1
failedLogs.append(dirpath)
elif results["skipped"] > 0 and results["success"] == 0:
with lock:
count_dict["skipped"] += 1
elif results["success"] > 0:
with lock:
count_dict["passed"] += 1
except UnsatisfiedRequirementError as ue:
print("===> {}: SKIPPED: {}".format(os.path.basename(dirpath), ue))
with lock:
count_dict["skipped"] += 1
except TestError as te:
print("===> {}: FAILED: {}".format(os.path.basename(dirpath), te))
check_args_fail()
with lock:
count_dict["failed"] += 1
def run_mp(jobs, tests, dirpath, args, cwd, suricata_config):
print("Number of concurrent jobs: %d" % jobs)
pool = mp.Pool(jobs)
try:
for dirpath in tests:
pool.apply_async(run_test, args=(dirpath, args, cwd, suricata_config))
except TerminatePoolError:
pool.terminate()
pool.close()
pool.join()
def run_single(tests, dirpath, args, cwd, suricata_config):
try:
for dirpath in tests:
run_test(dirpath, args, cwd, suricata_config)
except TerminatePoolError:
sys.exit(1)
def main():
global TOPDIR
global args
if not check_deps():
return 1
parser = argparse.ArgumentParser(description="Verification test runner.")
parser.add_argument("-v", dest="verbose", action="store_true")
parser.add_argument("--force", dest="force", action="store_true",
help="Force running of skipped tests")
parser.add_argument("--fail", action="store_true",
help="Exit on test failure")
parser.add_argument("--testdir", action="store",
help="Runs tests from custom directory")
parser.add_argument("--exact", dest="exact", action="store_true",
help="Use supplied name to make an exact match")
parser.add_argument("--skip-tests", nargs="?", default=None,
help="Skip tests with a given pattern")
parser.add_argument("--outdir", action="store",
help="Outputs to custom directory")
parser.add_argument("--valgrind", dest="valgrind", action="store_true",
help="Run tests in with valgrind")
parser.add_argument("--self-test", action="store_true",
help="Run self tests")
parser.add_argument("--debug-failed", dest="debugfailed", action="store_true",
help="Prints debug output for failed tests")
parser.add_argument("patterns", nargs="*", default=[])
if LINUX:
parser.add_argument("-j", type=int, default=min(8, mp.cpu_count()),
help="Number of jobs to run")
args = parser.parse_args()
if args.self_test:
return unittest.main(argv=[sys.argv[0]])
TOPDIR = os.path.abspath(os.path.dirname(sys.argv[0]))
skipped = 0
passed = 0
failed = 0
# Get the current working directory, which should be the top
# suricata source directory.
cwd = os.getcwd()
if not (os.path.exists(suricata_yaml) and
os.path.exists(suricata_bin)):
print("error: this is not a suricata source directory or " +
"suricata is not built")
return 1
# Create a SuricataConfig object that is passed to all tests.
suricata_config = SuricataConfig(get_suricata_version())
suricata_config.valgrind = args.valgrind
tdir = os.path.join(TOPDIR, "tests")
if args.testdir:
tdir = os.path.abspath(args.testdir)
# First gather the tests so we can run them in alphabetic order.
tests = []
for dirpath, dirnames, filenames in os.walk(tdir, followlinks = True):
# The top directory is not a test...
if dirpath == os.path.join(TOPDIR, "tests"):
continue
if dirpath == tdir:
continue
basename = os.path.basename(dirpath)
if args.skip_tests:
skip_tests_opt = False
patterns = args.skip_tests.split(",")
for pattern in patterns:
if args.exact:
if pattern == basename:
skip_tests_opt = True
break
elif basename.find(pattern) > -1:
skip_tests_opt = True
break
if skip_tests_opt:
continue
# Check if there are sub-test directories
if "test.yaml" in filenames or "check.sh" in filenames:
# gets used by os.walk in this for loop
dirnames[0:] = []
else:
continue
if not args.patterns:
tests.append(dirpath)
else:
for pattern in args.patterns:
if args.exact:
if pattern == basename:
tests.append(dirpath)
elif basename.find(pattern) > -1:
tests.append(dirpath)
# Sort alphabetically.
tests.sort()
if LINUX:
run_mp(args.j, tests, dirpath, args, cwd, suricata_config)
else:
run_single(tests, dirpath, args, cwd, suricata_config)
passed = count_dict["passed"]
failed = count_dict["failed"]
skipped = count_dict["skipped"]
print("")
print("PASSED: %d" % (passed))
print("FAILED: %d" % (failed))
print("SKIPPED: %d" % (skipped))
if args.debugfailed:
if len(failedLogs) > 0:
print("")
print("Failed tests debug output:")
for dirpath in failedLogs:
print("- Test %s:" % os.path.basename(dirpath))
for r, d, f in os.walk(dirpath+"/output"):
for fname in f:
path = os.path.join(r, fname)
print(" - %s" % path)
try:
with open(path, "r") as fcontents:
try:
buf = fcontents.read().decode()
print(buf)
except:
print(" - [Not dumping file that won't utf-8 decode]")
except Exception as err:
print("Failed to open %s: %s" % (path, str(err)))
if failed > 0:
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| 34.615238 | 150 | 0.550184 |
7944e440304ec0fffa3883a09b109910d6f6c5d1 | 4,507 | py | Python | pointnet2_rsmix/modelnet_h5_dataset_origin.py | yugeeklab/RSMix | 1cacbdd80dccd7cacd9702575b6f8ffdfa4a5887 | [
"MIT"
] | 31 | 2022-02-08T02:49:01.000Z | 2022-03-31T05:39:15.000Z | pointnet2_rsmix/modelnet_h5_dataset_origin.py | yugeeklab/RSMix | 1cacbdd80dccd7cacd9702575b6f8ffdfa4a5887 | [
"MIT"
] | 4 | 2021-06-25T06:29:30.000Z | 2022-03-04T05:18:35.000Z | pointnet2_rsmix/modelnet_h5_dataset_origin.py | yugeeklab/RSMix | 1cacbdd80dccd7cacd9702575b6f8ffdfa4a5887 | [
"MIT"
] | 2 | 2022-02-08T05:41:21.000Z | 2022-02-24T13:33:34.000Z | '''
ModelNet dataset. Support ModelNet40, XYZ channels. Up to 2048 points.
Faster IO than ModelNetDataset in the first epoch.
'''
import os
import sys
import numpy as np
import h5py
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
# Download dataset for point cloud classification
DATA_DIR = os.path.join(ROOT_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,N,... numpy array
label: B,... numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx], idx
def getDataFiles(list_filename):
return [line.rstrip() for line in open(list_filename)]
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
return (data, label)
def loadDataFile(filename):
return load_h5(filename)
class ModelNetH5Dataset(object):
def __init__(self, list_filename, batch_size = 32, npoints = 1024, shuffle=True):
self.list_filename = list_filename
self.batch_size = batch_size
self.npoints = npoints
self.shuffle = shuffle
self.h5_files = getDataFiles(self.list_filename)
self.reset()
def reset(self):
''' reset order of h5 files '''
self.file_idxs = np.arange(0, len(self.h5_files))
if self.shuffle: np.random.shuffle(self.file_idxs)
self.current_data = None
self.current_label = None
self.current_file_idx = 0
self.batch_idx = 0
def _augment_batch_data(self, batch_data):
rotated_data = provider.rotate_point_cloud(batch_data)
rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
jittered_data = provider.shift_point_cloud(jittered_data)
jittered_data = provider.jitter_point_cloud(jittered_data)
rotated_data[:,:,0:3] = jittered_data
return provider.shuffle_points(rotated_data)
def _get_data_filename(self):
return self.h5_files[self.file_idxs[self.current_file_idx]]
def _load_data_file(self, filename):
self.current_data,self.current_label = load_h5(filename)
self.current_label = np.squeeze(self.current_label)
self.batch_idx = 0
if self.shuffle:
self.current_data, self.current_label, _ = shuffle_data(self.current_data,self.current_label)
def _has_next_batch_in_file(self):
return self.batch_idx*self.batch_size < self.current_data.shape[0]
def num_channel(self):
return 3
def has_next_batch(self):
# TODO: add backend thread to load data
if (self.current_data is None) or (not self._has_next_batch_in_file()):
if self.current_file_idx >= len(self.h5_files):
return False
self._load_data_file(self._get_data_filename())
self.batch_idx = 0
self.current_file_idx += 1
return self._has_next_batch_in_file()
def next_batch(self, augment=False):
''' returned dimension may be smaller than self.batch_size '''
start_idx = self.batch_idx * self.batch_size
end_idx = min((self.batch_idx+1) * self.batch_size, self.current_data.shape[0])
bsize = end_idx - start_idx
batch_label = np.zeros((bsize), dtype=np.int32)
data_batch = self.current_data[start_idx:end_idx, 0:self.npoints, :].copy()
label_batch = self.current_label[start_idx:end_idx].copy()
self.batch_idx += 1
if augment: data_batch = self._augment_batch_data(data_batch)
return data_batch, label_batch
if __name__=='__main__':
d = ModelNetH5Dataset('data/modelnet40_ply_hdf5_2048/train_files.txt')
print(d.shuffle)
print(d.has_next_batch())
ps_batch, cls_batch = d.next_batch(True)
print(ps_batch.shape)
print(cls_batch.shape) | 35.769841 | 105 | 0.676947 |
7944e6571a168ee10f9ea5f23bd6a5513709a1cb | 5,198 | py | Python | sphinx/conf.py | nadavweidman/pytconf | 6203d3607c1cc383c60d1c138efc1109c7a6ab59 | [
"MIT"
] | null | null | null | sphinx/conf.py | nadavweidman/pytconf | 6203d3607c1cc383c60d1c138efc1109c7a6ab59 | [
"MIT"
] | 1 | 2021-12-03T11:35:46.000Z | 2021-12-03T11:52:52.000Z | sphinx/conf.py | nadavweidman/pytconf | 6203d3607c1cc383c60d1c138efc1109c7a6ab59 | [
"MIT"
] | 8 | 2021-12-03T11:07:55.000Z | 2022-03-23T13:35:05.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pytconf documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 13 12:44:22 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pytconf'
# noinspection PyShadowingBuiltins
copyright = '2017, Mark Veltzer'
author = 'Mark Veltzer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytconf_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytconf.tex', 'pytconf Documentation',
'Mark Veltzer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pytconf', 'pytconf Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pytconf', 'pytconf Documentation',
author, 'pytconf', 'One line description of project.',
'Miscellaneous'),
]
| 31.50303 | 79 | 0.680262 |
7944e883a5a1cc66cacb72a15349573492faf43e | 199 | py | Python | LeFrameworkTest.py | inXS212/LeFramework | 0d82a13d863aa6a99e8d8ffd195325d6ffb67bd1 | [
"MIT"
] | null | null | null | LeFrameworkTest.py | inXS212/LeFramework | 0d82a13d863aa6a99e8d8ffd195325d6ffb67bd1 | [
"MIT"
] | null | null | null | LeFrameworkTest.py | inXS212/LeFramework | 0d82a13d863aa6a99e8d8ffd195325d6ffb67bd1 | [
"MIT"
] | null | null | null | import unittest
from ._tests.test_rl_replays import *
from ._tests.test_exercices import *
from ._tests.test_vector import *
"""LeFramework unittests"""
if __name__ == '__main__':
unittest.main()
| 19.9 | 37 | 0.763819 |
7944e8d659d37f5bb80bb36c4313b3b586b90e97 | 1,004 | py | Python | PyEFVLib/BoundaryConditions.py | Gustavo029/GridReader | 7edc950c469b06c3de0093e5fd8bf6cfd59af354 | [
"MIT"
] | 1 | 2022-01-26T17:14:54.000Z | 2022-01-26T17:14:54.000Z | PyEFVLib/BoundaryConditions.py | Gustavo029/GridReader | 7edc950c469b06c3de0093e5fd8bf6cfd59af354 | [
"MIT"
] | null | null | null | PyEFVLib/BoundaryConditions.py | Gustavo029/GridReader | 7edc950c469b06c3de0093e5fd8bf6cfd59af354 | [
"MIT"
] | 3 | 2020-10-26T07:11:19.000Z | 2022-01-26T17:14:42.000Z | import numpy as np
from numpy import pi, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, sqrt, e , log, exp, inf, mod, floor
def getFunction(expr):
def function(x,y,z,t):
return eval( expr.replace('x',str(x)).replace('y',str(y)).replace('z',str(z)).replace('t',str(t)) )
return function
class _BoundaryConditionBase:
def __init__(self, grid, boundary, value, handle, expression=False):
self.grid = grid
self.boundary = boundary
self.value = value
self.handle = handle
self.expression = expression
if self.expression:
self.function = getFunction(value)
def getValue(self, index, time=0.0): # Index argument for future variable boundaryCondition
if self.expression:
x,y,z = self.grid.vertices[index].getCoordinates()
return self.function(x,y,z,time)
else:
return self.value
class DirichletBoundaryCondition(_BoundaryConditionBase):
__type__="DIRICHLET"
class NeumannBoundaryCondition(_BoundaryConditionBase):
__type__="NEUMANN"
| 31.375 | 141 | 0.737052 |
7944eb8f98ee40e61725b1b167db275435808303 | 1,199 | py | Python | pyscript/white_or_cozy_lights.py | paarak/home-assistant-config-2 | b56f56693c4377b715a0ea286dff397609ad203c | [
"MIT"
] | null | null | null | pyscript/white_or_cozy_lights.py | paarak/home-assistant-config-2 | b56f56693c4377b715a0ea286dff397609ad203c | [
"MIT"
] | null | null | null | pyscript/white_or_cozy_lights.py | paarak/home-assistant-config-2 | b56f56693c4377b715a0ea286dff397609ad203c | [
"MIT"
] | 1 | 2021-01-30T02:03:15.000Z | 2021-01-30T02:03:15.000Z | from homeassistant.const import EVENT_CALL_SERVICE
def white_or_cozy(group_entity_id):
entity_ids = state.get_attr(group_entity_id)['entity_id']
attrs = [state.get_attr(entity_id) for entity_id in entity_ids]
if "xy_color" in attrs[0]:
xy_colors = [d['xy_color'] for d in attrs]
xs, ys = zip(*xy_colors)
x_std = statistics.stdev(xs)
y_std = statistics.stdev(ys)
col = "white" if x_std**2 + y_std**2 < 0.01 else "cozy"
else:
col = "white"
return col
def set_input_select(group_entity_id, input_select):
col = white_or_cozy(group_entity_id)
options = state.get_attr(input_select)["options"]
option = [opt for opt in options if col in opt][0]
state.set(input_select, option)
@event_trigger(EVENT_CALL_SERVICE, "domain == 'lights' and service == 'turn_on'")
def bedroom(domain=None, service=None, service_data=None):
input_select = "input_select.last_script_bedroom"
group_entity_id = "light.bedroom_lights"
set_input_select(group_entity_id, input_select)
# input_select = "input_select.last_script_bedroom"
# group_entity_id = "light.bedroom_lights"
# set_input_select(group_entity_id, input_select)
| 36.333333 | 81 | 0.717264 |
7944ebb402a3663769fe979c26a4a65bd035e828 | 3,644 | py | Python | src/GIS/Atlantis.py | patcmorneau/PecheFantome | 616ee6a7f01c7c63a034ced068a6a6e4f4698f2d | [
"MIT"
] | null | null | null | src/GIS/Atlantis.py | patcmorneau/PecheFantome | 616ee6a7f01c7c63a034ced068a6a6e4f4698f2d | [
"MIT"
] | 8 | 2020-02-19T20:03:44.000Z | 2022-02-03T19:27:24.000Z | src/GIS/Atlantis.py | patcmorneau/PecheFantome | 616ee6a7f01c7c63a034ced068a6a6e4f4698f2d | [
"MIT"
] | 3 | 2020-02-19T19:02:19.000Z | 2021-12-14T14:06:25.000Z | import GGlib
import mysql.connector
import gpxpy.gpx
from xml.etree import ElementTree as ET
import subprocess
#############################################################
# ****** ******* ** ****** **** ** ******** **********
# **////**/**////** **** /*////** /**/** /**/**///// /////**///
# ** // /** /** **//** /* /** /**//** /**/** /**
# /** /******* ** //** /****** /** //** /**/******* /**
# /** /**///** **********/*//// **/** //**/**/**//// /**
# //** **/** //** /**//////**/* /**/** //****/** /**
# //****** /** //**/** /**/******* /** //***/******** /**
# ////// // // // // /////// // /// //////// //
#
#
# __ __
# / <` '> \
# ( / @ @ \ )
# \(_ _\_/_ _)/
# (\ `-/ \-' /)
# "===\ /==="
# .==')___(`==.
# ' .=' `=.
#
##############################################################
#Ghost Gear database
class GGDB:
#default connexion
def __init__(self,host="cidco.ca",user="crabnet",password="crabnet213141$",database="crabnet",port = "3306"):
self.host = host
self.user = user
self.password = password
self.database = database
self.port = port
self.cnnx = mysql.connector.connect(host = host,
user = user,
password = password,
database = database,
)
def query(self,query):
#premade querys
if query == "trap" :
query = "SELECT * FROM crabnet.dfo_engins WHERE type='CASIER/CAGE - TRAP/POT'"
cursor = self.cnnx.cursor()
cursor.execute(query)
self.result = cursor.fetchall()
return self.result
else: #if query is not pre-made by CIDCO
try: # try custom query
cursor = self.cnnx.cursor()
cursor.execute(query)
self.result = cursor.fetchall()
return self.result
except mysql.connector.Error as err : #if query is not valid , print its error
print(err)
def dfo_engins2GPX(self, name, description):
gpx = gpxpy.gpx.GPX()
gpx.name = name
gpx.description = description
# For all trap
for trap in self.result:
longitude = trap[6]
latitude = trap[7]
waypoint = gpxpy.gpx.GPXWaypoint()
waypoint.longitude = longitude
waypoint.latitude = latitude
waypoint.name = "Casier {}".format(trap[0])
waypoint.description = trap[2]
gpx.waypoints.append(waypoint)
self.gpxResult = gpx.to_xml()
return self.gpxResult
def importSHP(self,shpFilePath):
db = "MYSQL:"+self.database+","+"host="+self.host+","+"user="+self.user+","+"password="+self.password+","+"port="+self.port
#ogr2ogr = "/usr/bin/ogr2ogr"
export = subprocess.Popen(["ogr2ogr", "-f", "MYSQL", db, "-a_srs", "EPSG:4326", shpFilePath])
if (export.wait() != 0):
return False
else:
return True
def extractSHP(self,table_name, polygon_name):
try:
query = "SELECT AsText(SHAPE) FROM "+table_name+" WHERE name ="+ polygon_name
cursor = self.cnnx.cursor()
cursor.execute(query) #executer query
result = cursor.fetchall()
tupl = result[0]
str_coord = tupl[0]
if (str_coord.startswith("POLYGON")):
str_coord = str_coord[9:-2]
coordinates = str_coord.split(",")
self.list_coordinates = []
for pair in coordinates:
pair = list(pair.split(" "))
self.list_coordinates.append(pair)
return self.list_coordinates
except mysql.connector.Error as err : #if query is not valid , print its error
print(err)
| 33.431193 | 125 | 0.482986 |
7944ec9514320dcc44ff386069f5adfa3fa34a63 | 2,132 | py | Python | virtualenviroments/skal/app/__init__.py | noahbjohnson/CS330-Final-Project | 95a59559623dd4acc5e591201cc940c3d4b20cd2 | [
"MIT"
] | null | null | null | virtualenviroments/skal/app/__init__.py | noahbjohnson/CS330-Final-Project | 95a59559623dd4acc5e591201cc940c3d4b20cd2 | [
"MIT"
] | null | null | null | virtualenviroments/skal/app/__init__.py | noahbjohnson/CS330-Final-Project | 95a59559623dd4acc5e591201cc940c3d4b20cd2 | [
"MIT"
] | null | null | null | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
import logging
import os
from logging.handlers import RotatingFileHandler, SMTPHandler
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_limiter import Limiter
# Initialize app and load configuration from file
app = Flask(__name__)
app.config.from_object(Config)
# Database references
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# User account references
login = LoginManager(app)
login.login_view = 'login'
# Mail settings
mail = Mail(app)
# Bootstrap references
bootstrap = Bootstrap(app)
# flask-moment references
moment = Moment(app)
# flask-limiter references
limiter = Limiter(app)
# Import app for rest of module
from app import routes, models, errors
# Error handling
if not app.debug:
# Email alert settings
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Skal Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
# App Log settings
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/skal.log', maxBytes=10240,
backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
# Startup console
app.logger.setLevel(logging.INFO)
app.logger.info('Skal startup')
| 29.611111 | 79 | 0.699343 |
7944ed6319cf2215c3399016b98f7eda3496ffc7 | 840 | py | Python | link/urls.py | everydaycodings/Shorten-the-link | 6962e293df4465fdaa4e08fb641fb3121d9560b2 | [
"MIT"
] | 1 | 2020-10-14T07:43:02.000Z | 2020-10-14T07:43:02.000Z | link/urls.py | everydaycodings/Shorten-the-link | 6962e293df4465fdaa4e08fb641fb3121d9560b2 | [
"MIT"
] | 1 | 2020-10-13T13:24:21.000Z | 2020-10-13T13:24:21.000Z | link/urls.py | everydaycodings/Shorten-the-link | 6962e293df4465fdaa4e08fb641fb3121d9560b2 | [
"MIT"
] | null | null | null | """Link_shortern URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from link import views
urlpatterns = [
path('', views.index, name='index'),
path('result', views.result, name='result'),
]
| 35 | 77 | 0.708333 |
7944ed6f3ab2ed2276b00bb638d9af49317451a0 | 1,346 | py | Python | cheritest/trunk/tests/alu/test_addiu_overflow.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 36 | 2015-05-29T16:47:19.000Z | 2022-02-08T21:16:26.000Z | cheritest/trunk/tests/alu/test_addiu_overflow.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 2 | 2020-06-02T13:44:55.000Z | 2020-06-02T14:06:29.000Z | cheritest/trunk/tests/alu/test_addiu_overflow.py | tupipa/beri | cef1b41d52592cfa7454ddf59f9f2994e447cd66 | [
"Apache-2.0"
] | 15 | 2015-06-11T07:10:58.000Z | 2021-06-18T05:14:54.000Z | #-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
class test_addiu_overflow(BaseBERITestCase):
def test_handled(self):
self.assertRegisterEqual(self.MIPS.a2, 0, "addiu triggered overflow exception")
| 40.787879 | 87 | 0.771174 |
7944edf1f31499ecda7338eed22f3e46ac4e1fd7 | 15,778 | py | Python | bundle/vim-orgmode/tests/test_plugin_todo.py | ninegrid/dotfiles-vim | 4604f8a2e114cb2e98d5d79f2f41048c4f564b02 | [
"Unlicense"
] | null | null | null | bundle/vim-orgmode/tests/test_plugin_todo.py | ninegrid/dotfiles-vim | 4604f8a2e114cb2e98d5d79f2f41048c4f564b02 | [
"Unlicense"
] | null | null | null | bundle/vim-orgmode/tests/test_plugin_todo.py | ninegrid/dotfiles-vim | 4604f8a2e114cb2e98d5d79f2f41048c4f564b02 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
sys.path.append(u'../ftplugin')
import unittest
from orgmode.liborgmode.base import Direction
from orgmode.vimbuffer import VimBuffer
from orgmode.plugins.Todo import Todo
import vim
counter = 0
class TodoTestCase(unittest.TestCase):
u"""Tests all the functionality of the TODO module."""
def setUp(self):
# set content of the buffer
global counter
counter += 1
vim.EVALHISTORY = []
vim.EVALRESULTS = {
# no org_todo_keywords for b
u'exists("b:org_todo_keywords")'.encode(u'utf-8'): '0'.encode(u'utf-8'),
# global values for org_todo_keywords
u'exists("g:org_todo_keywords")'.encode(u'utf-8'): '1'.encode(u'utf-8'),
u'g:org_todo_keywords'.encode(u'utf-8'): [u'TODO'.encode(u'utf-8'), u'DONE'.encode(u'utf-8'), u'|'.encode(u'utf-8')],
u'exists("g:org_debug")'.encode(u'utf-8'): u'0'.encode(u'utf-8'),
u'exists("b:org_debug")'.encode(u'utf-8'): u'0'.encode(u'utf-8'),
u'exists("*repeat#set()")'.encode(u'utf-8'): u'0'.encode(u'utf-8'),
u'b:changedtick'.encode(u'utf-8'): (u'%d' % counter).encode(u'utf-8'),
u"v:count".encode(u'utf-8'): u'0'.encode(u'utf-8')
}
vim.current.buffer[:] = [ i.encode(u'utf-8') for i in u"""
* Heading 1
** Text 1
*** Text 2
* Text 1
** Text 1
some text that is
no heading
""".split(u'\n') ]
# toggle
def test_toggle_todo_with_no_heading(self):
# nothing should happen
vim.current.window.cursor = (1, 0)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[0], u'')
# and repeat it -> it should not change
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[0], u'')
def test_todo_toggle_NOTODO(self):
vim.current.window.cursor = (2, 0)
vim.current.buffer[1] = u'** NOTODO Überschrift 1.1'.encode(u'utf-8')
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'** TODO NOTODO Überschrift 1.1'.encode(u'utf-8'))
def test_toggle_todo_in_heading_with_no_todo_state_different_levels(self):
# level 1
vim.current.window.cursor = (2, 0)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* TODO Heading 1')
self.assertEqual((2, 0), vim.current.window.cursor)
# level 2
vim.current.window.cursor = (3, 0)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[2], u'** TODO Text 1')
# level 2
vim.current.window.cursor = (4, 4)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[3], u'*** TODO Text 2')
self.assertEqual((4, 9), vim.current.window.cursor)
def test_circle_through_todo_states(self):
# * Heading 1 -->
# * TODO Heading 1 -->
# * DONE Heading 1 -->
# * Heading 1 -->
# * TODO Heading 1 -->
# * DONE Heading 1
vim.current.window.cursor = (2, 6)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* TODO Heading 1')
self.assertEqual((2, 11), vim.current.window.cursor)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* DONE Heading 1')
self.assertEqual((2, 11), vim.current.window.cursor)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* Heading 1')
self.assertEqual((2, 6), vim.current.window.cursor)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* TODO Heading 1')
self.assertEqual((2, 11), vim.current.window.cursor)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* DONE Heading 1')
self.assertEqual((2, 11), vim.current.window.cursor)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* Heading 1')
self.assertEqual((2, 6), vim.current.window.cursor)
def test_circle_through_todo_states_with_more_states(self):
# * Heading 1 -->
# * TODO Heading 1 -->
# * STARTED Heading 1 -->
# * DONE Heading 1 -->
# * Heading 1 -->
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'STARTED'.encode(u'utf-8'), u'DONE'.encode(u'utf-8'),
u'|'.encode(u'utf-8')]
vim.current.window.cursor = (2, 0)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* TODO Heading 1')
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* STARTED Heading 1')
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* DONE Heading 1')
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[1], u'* Heading 1')
def test_toggle_todo_with_cursor_in_text_not_heading(self):
# nothing should happen
vim.current.window.cursor = (7, 0)
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[5], u'** TODO Text 1')
self.assertEqual(vim.current.window.cursor, (7, 0))
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[5], u'** DONE Text 1')
self.assertEqual(vim.current.window.cursor, (7, 0))
Todo.toggle_todo_state()
self.assertEqual(vim.current.buffer[5], u'** Text 1')
self.assertEqual(vim.current.window.cursor, (7, 0))
# get_states
def test_get_states_without_seperator(self):
u"""The last element in the todostates shouold be used as DONE-state when no sperator is given"""
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'DONE'.encode(u'utf-8')]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo, expected_done = [u'TODO'], [u'DONE']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'INPROGRESS'.encode(u'utf-8'), u'DONE'.encode(u'utf-8')]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo = [u'TODO', u'INPROGRESS']
expected_done = [u'DONE']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'INPROGRESS'.encode(u'utf-8'),
u'DUMMY'.encode(u'utf-8'), u'DONE'.encode(u'utf-8')]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo = [u'TODO', u'INPROGRESS', u'DUMMY']
expected_done = [u'DONE']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
def test_get_states_with_seperator(self):
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'|'.encode(u'utf-8'), u'DONE'.encode(u'utf-8')]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo = [u'TODO']
expected_done = [u'DONE']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'INPROGRESS'.encode(u'utf-8'), u'|'.encode(u'utf-8'),
u'DONE'.encode(u'utf-8')]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo = [u'TODO', u'INPROGRESS']
expected_done = [u'DONE']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'INPROGRESS'.encode(u'utf-8'),
u'DUMMY'.encode(u'utf-8'), u'|'.encode(u'utf-8'), u'DONE'.encode(u'utf-8')]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo = [u'TODO', u'INPROGRESS', u'DUMMY']
expected_done = [u'DONE']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'INPROGRESS'.encode(u'utf-8'),
u'DUMMY'.encode(u'utf-8'), u'|'.encode(u'utf-8'), u'DELEGATED'.encode(u'utf-8'), u'DONE'.encode(u'utf-8')]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo =[u'TODO', u'INPROGRESS', u'DUMMY']
expected_done = [u'DELEGATED', u'DONE']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [u'TODO'.encode(u'utf-8'), u'|'.encode(u'utf-8'), u'DONEX'.encode(u'utf-8'),
u'DUMMY'.encode(u'utf-8'), u'DELEGATED'.encode(u'utf-8'), u'DONE'.encode(u'utf-8')]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo = [u'TODO']
expected_done = [u'DONEX', u'DUMMY', u'DELEGATED', u'DONE']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
vim.EVALRESULTS[u'g:org_todo_keywords'.encode(u'utf-8')] = [[u'TODO(t)'.encode(u'utf-8'), u'|'.encode(u'utf-8'), u'DONEX'.encode(u'utf-8')],
[u'DUMMY'.encode(u'utf-8'), u'DELEGATED'.encode(u'utf-8'), u'DONE'.encode(u'utf-8')]]
states_todo, states_done = VimBuffer().get_todo_states()[0]
expected_todo = [u'TODO']
expected_done = [u'DONEX']
self.assertEqual(states_todo, expected_todo)
self.assertEqual(states_done, expected_done)
# get_next_state
def test_get_next_state_with_no_current_state(self):
states = [((u'TODO', ), (u'DONE', ))]
current_state = u''
self.assertEquals(Todo._get_next_state(current_state, states), u'TODO')
states = [((u'TODO', u'NEXT'), (u'DELEGATED', u'DONE'))]
self.assertEquals(Todo._get_next_state(current_state, states), u'TODO')
states = [((u'NEXT', ), (u'DELEGATED', u'DONE'))]
self.assertEquals(Todo._get_next_state(current_state, states), u'NEXT')
def test_get_next_state_backward_with_no_current_state(self):
states = [((u'TODO', ), (u'DONE', ))]
current_state = u''
self.assertEquals(Todo._get_next_state(current_state, states,
Direction.BACKWARD), u'DONE')
states = [((u'TODO', u'NEXT'), (u'DELEGATED', u'DONE'))]
self.assertEquals(Todo._get_next_state(current_state, states,
Direction.BACKWARD), u'DONE')
states = [((u'NEXT', ), (u'DELEGATED', u'DONE'))]
self.assertEquals(Todo._get_next_state(current_state, states,
Direction.BACKWARD), u'DONE')
def test_get_next_state_with_invalid_current_state(self):
states = [((u'TODO', ), (u'DONE', ))]
current_state = u'STI'
self.assertEquals(Todo._get_next_state(current_state, states), u'TODO')
states = [((u'TODO', u'NEXT'), (u'DELEGATED', u'DONE'))]
self.assertEquals(Todo._get_next_state(current_state, states), u'TODO')
states = [((u'NEXT', ), (u'DELEGATED', u'DONE'))]
self.assertEquals(Todo._get_next_state(current_state, states), u'NEXT')
def test_get_next_state_backward_with_invalid_current_state(self):
states = [((u'TODO', ), (u'DONE', ))]
current_state = u'STI'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'DONE')
states = [((u'TODO', u'NEXT'), (u'DELEGATED', u'DONE'))]
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'DONE')
states = [((u'NEXT', ), (u'DELEGATED', u'DONE'))]
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'DONE')
def test_get_next_state_with_current_state_equals_todo_state(self):
states = [((u'TODO', u'NEXT', u'NOW'), (u'DELEGATED', u'DONE'))]
current_state = u'TODO'
self.assertEquals(Todo._get_next_state(current_state, states), u'NEXT')
current_state = u'NEXT'
self.assertEquals(Todo._get_next_state(current_state, states), u'NOW')
def test_get_next_state_backward_with_current_state_equals_todo_state(self):
states = [((u'TODO', u'NEXT', u'NOW'), (u'DELEGATED', u'DONE'))]
current_state = u'TODO'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, None)
def test_get_next_state_backward_misc(self):
states = [((u'TODO', u'NEXT', u'NOW'), (u'DELEGATED', u'DONE'))]
current_state = u'DONE'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'DELEGATED')
current_state = u'DELEGATED'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'NOW')
current_state = u'NOW'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'NEXT')
current_state = u'NEXT'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'TODO')
current_state = u'TODO'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, None)
current_state = None
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'DONE')
def test_get_next_state_with_jump_from_todo_to_done(self):
states = [((u'TODO', u'NEXT', u'NOW'), (u'DELEGATED', u'DONE'))]
current_state = u'NOW'
self.assertEquals(Todo._get_next_state(current_state, states), u'DELEGATED')
def test_get_next_state_with_jump_from_done_to_todo(self):
states = [((u'TODO', u'NEXT', u'NOW'), (u'DELEGATED', u'DONE'))]
current_state = u'DONE'
self.assertEquals(Todo._get_next_state(current_state, states), None)
def test_get_next_state_in_current_sequence(self):
states = [((u'TODO', u'NEXT', u'NOW'), (u'DELEGATED', u'DONE')), ((u'QA', ), (u'RELEASED', ))]
current_state = u'QA'
result = Todo._get_next_state(current_state, states,
Direction.FORWARD)
self.assertEquals(result, u'RELEASED')
def test_get_next_state_in_current_sequence_with_access_keys(self):
states = [((u'TODO(t)', u'NEXT(n)', u'NOW(w)'), (u'DELEGATED(g)', u'DONE(d)')), ((u'QA(q)', ), (u'RELEASED(r)', ))]
current_state = u'QA'
result = Todo._get_next_state(current_state, states,
Direction.FORWARD)
self.assertEquals(result, u'RELEASED')
current_state = u'NEXT'
result = Todo._get_next_state(current_state, states,
Direction.FORWARD)
self.assertEquals(result, u'NOW')
current_state = u'TODO'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, None)
current_state = None
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD)
self.assertEquals(result, u'DONE')
def test_get_next_keyword_sequence(self):
states = [((u'TODO(t)', u'NEXT(n)', u'NOW(w)'), (u'DELEGATED(g)', u'DONE(d)')), ((u'QA(q)', ), (u'RELEASED(r)', ))]
current_state = None
result = Todo._get_next_state(current_state, states,
Direction.FORWARD, next_set=True)
self.assertEquals(result, u'TODO')
current_state = None
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD, next_set=True)
self.assertEquals(result, None)
current_state = u'TODO'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD, next_set=True)
self.assertEquals(result, u'TODO')
current_state = u'TODO'
result = Todo._get_next_state(current_state, states,
Direction.FORWARD, next_set=True)
self.assertEquals(result, u'QA')
current_state = u'NOW'
result = Todo._get_next_state(current_state, states,
Direction.FORWARD, next_set=True)
self.assertEquals(result, u'QA')
current_state = u'DELEGATED'
result = Todo._get_next_state(current_state, states,
Direction.FORWARD, next_set=True)
self.assertEquals(result, u'QA')
current_state = u'QA'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD, next_set=True)
self.assertEquals(result, u'TODO')
current_state = u'QA'
result = Todo._get_next_state(current_state, states,
Direction.FORWARD, next_set=True)
self.assertEquals(result, u'QA')
current_state = u'RELEASED'
result = Todo._get_next_state(current_state, states,
Direction.FORWARD, next_set=True)
self.assertEquals(result, u'RELEASED')
current_state = u'RELEASED'
result = Todo._get_next_state(current_state, states,
Direction.BACKWARD, next_set=True)
self.assertEquals(result, u'TODO')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TodoTestCase)
# vi: noexpandtab
| 37.300236 | 145 | 0.706427 |
7944eee51c305e00c4058d7329d06dee4a367ed9 | 1,571 | py | Python | quartical/config/internal.py | JSKenyon/QuartiCal | 2113855b080cfecc4a1c77cc9dad346ef3619716 | [
"MIT"
] | null | null | null | quartical/config/internal.py | JSKenyon/QuartiCal | 2113855b080cfecc4a1c77cc9dad346ef3619716 | [
"MIT"
] | null | null | null | quartical/config/internal.py | JSKenyon/QuartiCal | 2113855b080cfecc4a1c77cc9dad346ef3619716 | [
"MIT"
] | 1 | 2022-03-18T14:30:04.000Z | 2022-03-18T14:30:04.000Z | from dataclasses import make_dataclass
from pathlib import Path
from quartical.config.external import Gain
def gains_to_chain(opts):
terms = opts.solver.terms
Chain = make_dataclass(
"Chain",
[(t, Gain, Gain()) for t in terms]
)
chain = Chain()
for t in terms:
setattr(chain, t, getattr(opts, t))
return chain
def yield_from(obj, flds=None, name=True):
flds = (flds,) if isinstance(flds, str) else flds
for k in obj.__dataclass_fields__.keys():
if flds is None:
yield k
elif name:
yield (k, *(getattr(getattr(obj, k), fld) for fld in flds))
else:
yield (*(getattr(getattr(obj, k), fld) for fld in flds),)
def additional_validation(config):
chain = gains_to_chain(config)
root_path = Path(config.output.directory).absolute()
if root_path.exists() and not config.output.overwrite:
raise FileExistsError(f"{root_path} already exists. Specify "
f"output.overwrite=1 to suppress this "
f"error and overwrite *.qc files/folders.")
gain_path = root_path / Path("gains.qc")
load_dirs = [Path(lf).absolute().parent
for _, lf in yield_from(chain, "load_from") if lf]
msg = (
f"Output directory {str(gain_path)} contains terms which will be "
f"loaded/interpolated. This is not supported. Please specify a "
f"different output directory."
)
assert all(gain_path != ld for ld in load_dirs), msg
return
| 26.183333 | 74 | 0.614895 |
7944ef76f32c41ed33d1f7bb795ef0130db7d83e | 4,721 | py | Python | teslarent/management/commands/rental_start_end.py | flarno11/teslarent | 388b5fefdc7bca0a460c5db5ea24b233467e5183 | [
"Apache-2.0"
] | null | null | null | teslarent/management/commands/rental_start_end.py | flarno11/teslarent | 388b5fefdc7bca0a460c5db5ea24b233467e5183 | [
"Apache-2.0"
] | 13 | 2020-02-11T21:56:50.000Z | 2022-02-17T14:37:34.000Z | teslarent/management/commands/rental_start_end.py | flarno11/teslarent | 388b5fefdc7bca0a460c5db5ea24b233467e5183 | [
"Apache-2.0"
] | 2 | 2019-06-27T20:38:48.000Z | 2019-06-27T21:15:54.000Z | import datetime
import threading
import logging
import time
import queue
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from teslarent.models import Rental, VehicleData
from teslarent.teslaapi import teslaapi
from teslarent.utils.Singleton import Singleton
log = logging.getLogger('backgroundTask')
@Singleton
class BackgroundTask:
thread = None
_initialized_at = None
def __init__(self):
log.debug('BackgroundTask created')
self.queue = queue.Queue()
def ensure_thread_running(self):
if not self.thread:
self.thread = threading.Thread(target=self.background_process, args=(), kwargs={})
self.thread.setDaemon(False) # allow Django to exit while background thread is still running
self.thread.start()
self._initialized_at = timezone.now()
else:
self.queue.put(None)
def background_process(self):
log.debug('background_process self.thread.name=' + self.thread.name)
while True:
next_rental_at = Rental.get_next_rental_start_or_end_time(timezone.now())
if not next_rental_at:
log.debug('no next rental found, leaving for now')
break
now = timezone.now()
if next_rental_at < now:
log.error('next rental is in the past. now=%s, next_rental_at=%s' % (str(now), str(next_rental_at)))
break
diff = (next_rental_at - now).total_seconds()
log.info('next rental at %s, sleeping for %d secs' % (str(next_rental_at), diff))
try:
self.queue.get(timeout=diff)
log.debug('interrupted by external event')
except queue.Empty:
log.debug('interrupted by timeout')
Command().update_rentals(timezone.now())
log.debug('exiting thread')
self.thread = None
@property
def initialized_at(self):
return self._initialized_at
class Command(BaseCommand):
help = 'Update started and ended rentals'
def add_arguments(self, parser):
#parser.add_argument('poll_id', nargs='+', type=int)
pass
def handle(self, *args, **options):
self.update_rentals(timezone.now())
@staticmethod
def update_rentals(date):
five_minutes_ago = date - datetime.timedelta(minutes=5)
rentals_start = list(Rental.objects.filter(start__gte=five_minutes_ago, start__lte=date))
rentals_end = list(Rental.objects.filter(end__gte=five_minutes_ago, end__lte=date))
vehicles = set()
for rental in rentals_start:
if not rental.odometer_start:
vehicles.add(rental.vehicle)
for rental in rentals_end:
if not rental.odometer_end:
vehicles.add(rental.vehicle)
vehicles = {vehicle.id: {'vehicle': vehicle} for vehicle in vehicles}
log.debug('update rentals for vehicles=%s rentals_start=%s rentals_end=%s' % (str(vehicles), str(rentals_start), str(rentals_end)))
for d in vehicles.values():
vehicle = d['vehicle']
call_command('fetch_vehicles_data', wakeup=True, vehicle_id=vehicle.id)
latest_vehicle_data = VehicleData.objects.filter(vehicle=vehicle).order_by('-created_at')[0]
diff = (timezone.now() - latest_vehicle_data.created_at).total_seconds()
if diff < 20:
d['latest_vehicle_data'] = latest_vehicle_data
else:
log.error('no latest vehicle data for rental %d diff=%s' % (rental.id, str(diff)))
for rental in rentals_start:
if not rental.odometer_start and 'latest_vehicle_data' in vehicles[rental.vehicle.id]:
latest_vehicle_data = vehicles[rental.vehicle.id]['latest_vehicle_data']
rental.odometer_start = int(round(latest_vehicle_data.vehicle_state__odometer, 0))
rental.odometer_start_updated_at = date
log.info('update rentals for rental %d start=%s' % (rental.id, str(rental.start)))
rental.save()
for rental in rentals_end:
if not rental.odometer_end and 'latest_vehicle_data' in vehicles[rental.vehicle.id]:
latest_vehicle_data = vehicles[rental.vehicle.id]['latest_vehicle_data']
rental.odometer_end = int(round(latest_vehicle_data.vehicle_state__odometer, 0))
rental.odometer_end_updated_at = date
log.info('update rentals for rental %d end=%s' % (rental.id, str(rental.end)))
rental.save()
| 38.382114 | 139 | 0.643508 |
7944efab601d059a61d1aeed03a1670b12016fbb | 4,809 | py | Python | cattledb/commands/base.py | reiterd/cattledb | 015214afa5c3b1e94b555b138334163068aaf982 | [
"MIT"
] | 1 | 2021-02-26T14:52:40.000Z | 2021-02-26T14:52:40.000Z | cattledb/commands/base.py | reiterd/cattledb | 015214afa5c3b1e94b555b138334163068aaf982 | [
"MIT"
] | 6 | 2020-04-30T19:20:28.000Z | 2021-03-09T15:10:50.000Z | cattledb/commands/base.py | reiterd/cattledb | 015214afa5c3b1e94b555b138334163068aaf982 | [
"MIT"
] | 5 | 2020-05-04T09:10:33.000Z | 2021-07-20T15:04:50.000Z | #!/usr/bin/python
# coding: utf-8
import os
import click
import time
from ..core.models import MetricDefinition, EventDefinition, MetricType, EventSeriesType
@click.command()
@click.option('--force', is_flag=True)
@click.pass_context
def initdb(ctx, force):
"""Initialize the Database."""
db = ctx.obj["db"]
config = ctx.obj["config"]
assert db.init == False
if force:
click.confirm('Warning: This will init the database even if it already existed.', abort=True)
# check for events and metrics
if hasattr(config, "METRICS"):
metr = config.METRICS
click.echo("Loading {} metrics definitions".format(len(metr)))
db.add_metric_definitions(metr)
if hasattr(config, "EVENTS"):
ev = config.EVENTS
click.echo("Loading {} event definitions".format(len(ev)))
db.add_event_definitions(ev)
click.echo('Initializing database ...')
db.database_init(silent=force)
click.echo("Finished")
# @click.command()
# @click.option('--force', is_flag=True)
# @click.pass_context
# def create_metrics(ctx, force):
# """Create all metric columns."""
# db = ctx.obj["db"]
# config = ctx.obj["config"]
# if force:
# click.confirm('This will create all metrics even if they existed before', abort=True)
# db.service_init()
# assert db.init == True
# click.echo('Creating all metrics ...')
# db.create_all_metrics(silent=force)
# click.echo("Finished")
@click.command()
@click.pass_context
def dbinfo(ctx):
"""Show information of the selected Database."""
db = ctx.obj["db"]
config = ctx.obj["config"]
db.service_init()
assert db.init == True
click.echo('Reading database structure ...')
tables = db.read_database_structure()
for t in tables:
click.echo("# TABLE: {} ({})".format(t["name"], t["full_name"]))
for cf in t["column_families"]:
click.echo("- {}".format(cf))
click.echo("# EVENTS")
for e in db.event_definitions:
click.echo(e)
click.echo("# METRICS")
for m in db.metric_definitions:
click.echo(m)
click.echo("# ALL GOOD!")
@click.command()
@click.option('--metricid', prompt='Short metric identifier (2-6 chars)', type=str)
@click.option('--metricname', prompt='Metric name', type=str)
@click.option('--metrictype', prompt='Metric type', default="float",
type=click.Choice(['float', 'dict'], case_sensitive=False))
@click.option('--delete/--nodelete', prompt='Allow delete operations', default=True,
is_flag=True)
@click.pass_context
def newmetric(ctx, metricid, metricname, metrictype, delete):
"""Create a new metric for timeseries storage."""
db = ctx.obj["db"]
db.service_init()
assert db.init == True
click.echo('Creating a new metric definition ...')
t = MetricType.DICTSERIES if metrictype == "dict" else MetricType.FLOATSERIES
m = MetricDefinition(metricname, metricid, t, delete)
db.new_metric_definition(m)
click.echo('Created metric definition: {}'.format(metricname))
@click.command()
@click.option('--eventname', prompt='Event name', type=str)
@click.option('--eventtype', prompt='Event type', default="daily",
type=click.Choice(['daily', 'monthly'], case_sensitive=False))
@click.pass_context
def newevent(ctx, eventname, eventtype):
"""Create a new event definition for the event storage."""
db = ctx.obj["db"]
db.service_init()
assert db.init == True
click.echo('Creating a new event definition ...')
t = EventSeriesType.MONTHLY if eventtype == "monthly" else EventSeriesType.DAILY
e = EventDefinition(eventname, t)
db.new_event_definition(e)
click.echo('Created event definition: {}'.format(eventname))
@click.command()
@click.option('--port', type=int, default=5000)
@click.option('--debug/--nodebug', is_flag=True, default=True)
@click.pass_context
def runserver(ctx, port, debug):
"""Run Rest Server (test server)."""
from ..restserver import _create_app
config = ctx.obj["config"]
app = _create_app(config)
click.echo("Starting development REST server. DO NOT USE IN PRODUCTION!")
app.run(host='0.0.0.0', port=port, debug=debug, threaded=False)
@click.command()
@click.argument("key")
@click.pass_context
def download_timeseries(ctx, key):
"""Download data from the timeseries storage."""
db = ctx.obj["db"]
db.service_init()
assert db.init == True
t1 = time.time()
client = ctx.obj["client"]
res = client.get_full_timeseries(key)
file_name = os.path.realpath("test.csv")
with open(file_name, "w") as fp:
res.to_csv(fp)
fs = os.path.getsize(file_name)
fs = fs / 1024
click.echo("Download finished. {:.2f} kb in {:.2f} seconds".format(fs, time.time()-t1))
| 33.629371 | 101 | 0.659805 |
7944f07ec430de7a0824a7a5274e39f4ef37f28f | 5,236 | py | Python | app/engine/chapter_title.py | ViolaBuddy/EscapeFromPlegia | 5228b42e8525b445854d742dccf85ca65b320d70 | [
"MIT"
] | null | null | null | app/engine/chapter_title.py | ViolaBuddy/EscapeFromPlegia | 5228b42e8525b445854d742dccf85ca65b320d70 | [
"MIT"
] | null | null | null | app/engine/chapter_title.py | ViolaBuddy/EscapeFromPlegia | 5228b42e8525b445854d742dccf85ca65b320d70 | [
"MIT"
] | null | null | null | from app.constants import WINWIDTH, WINHEIGHT
from app.engine.sprites import SPRITES
from app.engine.sound import SOUNDTHREAD
from app.engine.fonts import FONT
from app.engine.state import State
from app.engine import background, image_mods, engine
from app.engine.game_state import game
class ChapterTitleState(State):
name = 'chapter_title'
sigil1 = SPRITES.get('chapter_title_sigil').convert_alpha()
sigil2 = SPRITES.get('chapter_title_sigil2').convert_alpha()
def start(self):
"""
Displays the chapter title screen
"""
self.state = 'begin'
self.wait_time = engine.get_time()
self.bg = background.TransitionBackground(SPRITES.get('chapter_title_bg'), fade=False)
self.ribbon = SPRITES.get('chapter_title_ribbon')
self.sigil_fade = 0
self.banner_grow_y = 6
self.banner_grow_x = 0
self.banner_fade = 0
# Fade in music
self.music_flag = False
if game.memory.get('chapter_title_sound'):
song = SOUNDTHREAD.fade_in(game.memory.get('chapter_sound'))
if song:
self.music_flag = True
if not self.music_flag:
song = SOUNDTHREAD.fade_in('Chapter Sound')
if song:
self.music_flag = True
self.title = game.memory.get('chapter_title_title')
if not self.title:
self.title = game.level.name
game.state.change('transition_in')
return 'repeat'
def take_input(self, event):
if event in ('START', 'SELECT', 'BACK'):
if self.music_flag:
SOUNDTHREAD.stop()
game.state.change('transition_pop')
self.state = 'end'
return 'repeat'
def update(self):
current_time = engine.get_time()
if self.state == 'begin':
if current_time - self.wait_time > 50 * 16:
self.state = 'sigil'
self.wait_time = current_time
elif self.state == 'sigil':
self.sigil_fade = (current_time - self.wait_time) / 1100
if self.sigil_fade >= 1:
self.sigil_fade = 1
self.state = 'banner_grow_x'
elif self.state == 'banner_grow_x':
self.banner_grow_x += 10
if self.banner_grow_x >= WINWIDTH:
self.banner_grow_x = WINWIDTH
self.state = 'banner_grow_y'
elif self.state == 'banner_grow_y':
self.banner_grow_y += 2
if self.banner_grow_y >= self.ribbon.get_height():
self.banner_grow_y = self.ribbon.get_height()
self.state = 'ribbon_fade_in'
elif self.state == 'ribbon_fade_in':
self.banner_fade += 0.02
if self.banner_fade >= 1:
self.banner_fade = 1
self.wait_time = current_time
self.state = 'wait'
elif self.state == 'wait':
if current_time - self.wait_time > 5000:
self.state = 'sigil_out'
elif self.state == 'sigil_out':
self.sigil_fade -= .02
if self.sigil_fade <= 0:
self.sigil_fade = 0
self.state = 'ribbon_close'
if self.music_flag:
SOUNDTHREAD.fade_to_stop()
elif self.state == 'ribbon_close':
self.banner_grow_y -= 2
if self.banner_grow_y <= 0:
self.banner_grow_y = 0
self.state = 'end'
game.state.change('transition_pop')
elif self.state == 'end':
pass
def draw(self, surf):
engine.fill(surf, (0, 0, 0))
self.bg.update()
self.bg.draw(surf)
# # Draw sigil
sigil_outline = image_mods.make_translucent(self.sigil1, 1 - self.sigil_fade)
sigil_middle = image_mods.make_translucent(self.sigil2, 1 - self.sigil_fade)
center_x = WINWIDTH//2 - sigil_outline.get_width()//2
center_y = WINHEIGHT//2 - sigil_outline.get_height()//2
surf.blit(sigil_outline, (center_x + 1, center_y + 1))
surf.blit(sigil_middle, (center_x, center_y))
# # Draw ribbon
if self.state in ('ribbon_fade_in', 'wait', 'ribbon_close', 'sigil_out', 'end'):
new_ribbon = engine.copy_surface(self.ribbon)
name_width = FONT['chapter-yellow'].width(self.title)
position = (WINWIDTH//2 - name_width//2, self.ribbon.get_height()//2 - 6)
FONT['chapter-yellow'].blit(self.title, new_ribbon, position)
rect = (0, (self.ribbon.get_height() - self.banner_grow_y)//2, self.ribbon.get_width(), self.banner_grow_y)
new_ribbon = engine.subsurface(new_ribbon, rect)
engine.blit_center(surf, new_ribbon)
# # Draw banner
banner = image_mods.make_translucent(SPRITES.get('chapter_title_banner'), self.banner_fade)
banner = engine.subsurface(banner, (0, 0, self.banner_grow_x, self.banner_grow_y))
engine.blit_center(surf, banner)
return surf
| 36.873239 | 120 | 0.571238 |
7944f0cf6c5464d32375d477d62e3d8b0656b321 | 1,503 | py | Python | .history/my_classes/FirstClassFunctions/MapFilterZipList_20210706152516.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/MapFilterZipList_20210706152516.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/FirstClassFunctions/MapFilterZipList_20210706152516.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """Map Filter Zip List Comprehensions
Higher order functions
A function that takes a function as a parameter and/or returns a function as it's return value
Example: sorted
map _
|
-- modern alternative -> list comprehensions and generator expressions
|
filter -
The map function
map(func, *iterables)
*iterables -> avariable number of iterable objects
func -> some function that takes a many arguments as there are iterable objects passed to iterables
map(func, *iterables) will then return an iterator that calculates the function applied to each element of the iterables
The iterator stops as soon as one of the iterables has been exhausted, so, unequal length iterables can be used
Examples
"""
l = [2, 3, 4]
def sq(x):
return x**2
list(map(sq, l)) # [4, 9, 19]
l1 = [1, 2, 3]
l2 = [10, 20, 30]
def add(x, y):
return x + y
list(map(add, l1, l2)) # [11, 22, 33]
"""The filter function
filter(func, iterable)
iterable -> a single iterable
func -> some function that takes a single argument
filter(func, iterable) will then return an iterator that contains all the elements of the iterable for which the function called on it is Truthy
If the function is None, it simply returns the elements of iterable that are Truthy
Examples
"""
l = [0, 1, 2, 3, 4] # 0 is Falsey, all the other numbers are Truthy
| 22.102941 | 144 | 0.646041 |
7944f1c995beda066665fa256684d778edfd489a | 17,922 | py | Python | 3rdparty/experimental/gui/freetype-2.5.3/src/tools/docmaker/tohtml.py | ruoranluomu/AliOS-Things | d0f3431bcacac5b61645e9beb231a0a53be8078b | [
"Apache-2.0"
] | 4,538 | 2017-10-20T05:19:03.000Z | 2022-03-30T02:29:30.000Z | 3rdparty/experimental/gui/freetype-2.5.3/src/tools/docmaker/tohtml.py | ruoranluomu/AliOS-Things | d0f3431bcacac5b61645e9beb231a0a53be8078b | [
"Apache-2.0"
] | 1,088 | 2017-10-21T07:57:22.000Z | 2022-03-31T08:15:49.000Z | 3rdparty/experimental/gui/freetype-2.5.3/src/tools/docmaker/tohtml.py | ruoranluomu/AliOS-Things | d0f3431bcacac5b61645e9beb231a0a53be8078b | [
"Apache-2.0"
] | 1,860 | 2017-10-20T05:22:35.000Z | 2022-03-27T10:54:14.000Z | # ToHTML (c) 2002, 2003, 2005-2008, 2013
# David Turner <[email protected]>
from sources import *
from content import *
from formatter import *
import time
# The following defines the HTML header used by all generated pages.
html_header_1 = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>\
"""
html_header_2 = """\
API Reference</title>
<style type="text/css">
body { font-family: Verdana, Geneva, Arial, Helvetica, serif;
color: #000000;
background: #FFFFFF; }
p { text-align: justify; }
h1 { text-align: center; }
li { text-align: justify; }
td { padding: 0 0.5em 0 0.5em; }
td.left { padding: 0 0.5em 0 0.5em;
text-align: left; }
a:link { color: #0000EF; }
a:visited { color: #51188E; }
a:hover { color: #FF0000; }
span.keyword { font-family: monospace;
text-align: left;
white-space: pre;
color: darkblue; }
pre.colored { color: blue; }
ul.empty { list-style-type: none; }
</style>
</head>
<body>
"""
html_header_3 = """
<table align=center><tr><td><font size=-1>[<a href="\
"""
html_header_3i = """
<table align=center><tr><td width="100%"></td>
<td><font size=-1>[<a href="\
"""
html_header_4 = """\
">Index</a>]</font></td>
<td width="100%"></td>
<td><font size=-1>[<a href="\
"""
html_header_5 = """\
">TOC</a>]</font></td></tr></table>
<center><h1>\
"""
html_header_5t = """\
">Index</a>]</font></td>
<td width="100%"></td></tr></table>
<center><h1>\
"""
html_header_6 = """\
API Reference</h1></center>
"""
# The HTML footer used by all generated pages.
html_footer = """\
</body>
</html>\
"""
# The header and footer used for each section.
section_title_header = "<center><h1>"
section_title_footer = "</h1></center>"
# The header and footer used for code segments.
code_header = '<pre class="colored">'
code_footer = '</pre>'
# Paragraph header and footer.
para_header = "<p>"
para_footer = "</p>"
# Block header and footer.
block_header = '<table align=center width="75%"><tr><td>'
block_footer_start = """\
</td></tr></table>
<hr width="75%">
<table align=center width="75%"><tr><td><font size=-2>[<a href="\
"""
block_footer_middle = """\
">Index</a>]</font></td>
<td width="100%"></td>
<td><font size=-2>[<a href="\
"""
block_footer_end = """\
">TOC</a>]</font></td></tr></table>
"""
# Description header/footer.
description_header = '<table align=center width="87%"><tr><td>'
description_footer = "</td></tr></table><br>"
# Marker header/inter/footer combination.
marker_header = '<table align=center width="87%" cellpadding=5><tr bgcolor="#EEEEFF"><td><em><b>'
marker_inter = "</b></em></td></tr><tr><td>"
marker_footer = "</td></tr></table>"
# Header location header/footer.
header_location_header = '<table align=center width="87%"><tr><td>'
header_location_footer = "</td></tr></table><br>"
# Source code extracts header/footer.
source_header = '<table align=center width="87%"><tr bgcolor="#D6E8FF"><td><pre>\n'
source_footer = "\n</pre></table><br>"
# Chapter header/inter/footer.
chapter_header = '<br><table align=center width="75%"><tr><td><h2>'
chapter_inter = '</h2><ul class="empty"><li>'
chapter_footer = '</li></ul></td></tr></table>'
# Index footer.
index_footer_start = """\
<hr>
<table><tr><td width="100%"></td>
<td><font size=-2>[<a href="\
"""
index_footer_end = """\
">TOC</a>]</font></td></tr></table>
"""
# TOC footer.
toc_footer_start = """\
<hr>
<table><tr><td><font size=-2>[<a href="\
"""
toc_footer_end = """\
">Index</a>]</font></td>
<td width="100%"></td>
</tr></table>
"""
# source language keyword coloration/styling
keyword_prefix = '<span class="keyword">'
keyword_suffix = '</span>'
section_synopsis_header = '<h2>Synopsis</h2>'
section_synopsis_footer = ''
# Translate a single line of source to HTML. This will convert
# a "<" into "<.", ">" into ">.", etc.
def html_quote( line ):
result = string.replace( line, "&", "&" )
result = string.replace( result, "<", "<" )
result = string.replace( result, ">", ">" )
return result
class HtmlFormatter( Formatter ):
def __init__( self, processor, project_title, file_prefix ):
Formatter.__init__( self, processor )
global html_header_1, html_header_2, html_header_3
global html_header_4, html_header_5, html_footer
if file_prefix:
file_prefix = file_prefix + "-"
else:
file_prefix = ""
self.headers = processor.headers
self.project_title = project_title
self.file_prefix = file_prefix
self.html_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3 + file_prefix + "index.html" + \
html_header_4 + file_prefix + "toc.html" + \
html_header_5 + project_title + \
html_header_6
self.html_index_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3i + file_prefix + "toc.html" + \
html_header_5 + project_title + \
html_header_6
self.html_toc_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3 + file_prefix + "index.html" + \
html_header_5t + project_title + \
html_header_6
self.html_footer = "<center><font size=""-2"">generated on " + \
time.asctime( time.localtime( time.time() ) ) + \
"</font></center>" + html_footer
self.columns = 3
def make_section_url( self, section ):
return self.file_prefix + section.name + ".html"
def make_block_url( self, block ):
return self.make_section_url( block.section ) + "#" + block.name
def make_html_word( self, word ):
"""analyze a simple word to detect cross-references and styling"""
# look for cross-references
m = re_crossref.match( word )
if m:
try:
name = m.group( 1 )
rest = m.group( 2 )
block = self.identifiers[name]
url = self.make_block_url( block )
return '<a href="' + url + '">' + name + '</a>' + rest
except:
# we detected a cross-reference to an unknown item
sys.stderr.write( \
"WARNING: undefined cross reference '" + name + "'.\n" )
return '?' + name + '?' + rest
# look for italics and bolds
m = re_italic.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<i>' + name + '</i>' + rest
m = re_bold.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<b>' + name + '</b>' + rest
return html_quote( word )
def make_html_para( self, words ):
""" convert words of a paragraph into tagged HTML text, handle xrefs """
line = ""
if words:
line = self.make_html_word( words[0] )
for word in words[1:]:
line = line + " " + self.make_html_word( word )
# handle hyperlinks
line = re_url.sub( r'<a href="\1">\1</a>', line )
# convert `...' quotations into real left and right single quotes
line = re.sub( r"(^|\W)`(.*?)'(\W|$)", \
r'\1‘\2’\3', \
line )
# convert tilde into non-breakable space
line = string.replace( line, "~", " " )
return para_header + line + para_footer
def make_html_code( self, lines ):
""" convert a code sequence to HTML """
line = code_header + '\n'
for l in lines:
line = line + html_quote( l ) + '\n'
return line + code_footer
def make_html_items( self, items ):
""" convert a field's content into some valid HTML """
lines = []
for item in items:
if item.lines:
lines.append( self.make_html_code( item.lines ) )
else:
lines.append( self.make_html_para( item.words ) )
return string.join( lines, '\n' )
def print_html_items( self, items ):
print self.make_html_items( items )
def print_html_field( self, field ):
if field.name:
print "<table><tr valign=top><td><b>" + field.name + "</b></td><td>"
print self.make_html_items( field.items )
if field.name:
print "</td></tr></table>"
def html_source_quote( self, line, block_name = None ):
result = ""
while line:
m = re_source_crossref.match( line )
if m:
name = m.group( 2 )
prefix = html_quote( m.group( 1 ) )
length = len( m.group( 0 ) )
if name == block_name:
# this is the current block name, if any
result = result + prefix + '<b>' + name + '</b>'
elif re_source_keywords.match( name ):
# this is a C keyword
result = result + prefix + keyword_prefix + name + keyword_suffix
elif self.identifiers.has_key( name ):
# this is a known identifier
block = self.identifiers[name]
result = result + prefix + '<a href="' + \
self.make_block_url( block ) + '">' + name + '</a>'
else:
result = result + html_quote( line[:length] )
line = line[length:]
else:
result = result + html_quote( line )
line = []
return result
def print_html_field_list( self, fields ):
print "<p></p>"
print "<table cellpadding=3 border=0>"
for field in fields:
if len( field.name ) > 22:
print "<tr valign=top><td colspan=0><b>" + field.name + "</b></td></tr>"
print "<tr valign=top><td></td><td>"
else:
print "<tr valign=top><td><b>" + field.name + "</b></td><td>"
self.print_html_items( field.items )
print "</td></tr>"
print "</table>"
def print_html_markup( self, markup ):
table_fields = []
for field in markup.fields:
if field.name:
# we begin a new series of field or value definitions, we
# will record them in the 'table_fields' list before outputting
# all of them as a single table
#
table_fields.append( field )
else:
if table_fields:
self.print_html_field_list( table_fields )
table_fields = []
self.print_html_items( field.items )
if table_fields:
self.print_html_field_list( table_fields )
#
# Formatting the index
#
def index_enter( self ):
print self.html_index_header
self.index_items = {}
def index_name_enter( self, name ):
block = self.identifiers[name]
url = self.make_block_url( block )
self.index_items[name] = url
def index_exit( self ):
# block_index already contains the sorted list of index names
count = len( self.block_index )
rows = ( count + self.columns - 1 ) / self.columns
print "<table align=center border=0 cellpadding=0 cellspacing=0>"
for r in range( rows ):
line = "<tr>"
for c in range( self.columns ):
i = r + c * rows
if i < count:
bname = self.block_index[r + c * rows]
url = self.index_items[bname]
line = line + '<td><a href="' + url + '">' + bname + '</a></td>'
else:
line = line + '<td></td>'
line = line + "</tr>"
print line
print "</table>"
print index_footer_start + \
self.file_prefix + "toc.html" + \
index_footer_end
print self.html_footer
self.index_items = {}
def index_dump( self, index_filename = None ):
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.index_dump( self, index_filename )
#
# Formatting the table of content
#
def toc_enter( self ):
print self.html_toc_header
print "<center><h1>Table of Contents</h1></center>"
def toc_chapter_enter( self, chapter ):
print chapter_header + string.join( chapter.title ) + chapter_inter
print "<table cellpadding=5>"
def toc_section_enter( self, section ):
print '<tr valign=top><td class="left">'
print '<a href="' + self.make_section_url( section ) + '">' + \
section.title + '</a></td><td>'
print self.make_html_para( section.abstract )
def toc_section_exit( self, section ):
print "</td></tr>"
def toc_chapter_exit( self, chapter ):
print "</table>"
print chapter_footer
def toc_index( self, index_filename ):
print chapter_header + \
'<a href="' + index_filename + '">Global Index</a>' + \
chapter_inter + chapter_footer
def toc_exit( self ):
print toc_footer_start + \
self.file_prefix + "index.html" + \
toc_footer_end
print self.html_footer
def toc_dump( self, toc_filename = None, index_filename = None ):
if toc_filename == None:
toc_filename = self.file_prefix + "toc.html"
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.toc_dump( self, toc_filename, index_filename )
#
# Formatting sections
#
def section_enter( self, section ):
print self.html_header
print section_title_header
print section.title
print section_title_footer
maxwidth = 0
for b in section.blocks.values():
if len( b.name ) > maxwidth:
maxwidth = len( b.name )
width = 70 # XXX magic number
if maxwidth <> 0:
# print section synopsis
print section_synopsis_header
print "<table align=center cellspacing=5 cellpadding=0 border=0>"
columns = width / maxwidth
if columns < 1:
columns = 1
count = len( section.block_names )
rows = ( count + columns - 1 ) / columns
for r in range( rows ):
line = "<tr>"
for c in range( columns ):
i = r + c * rows
line = line + '<td></td><td>'
if i < count:
name = section.block_names[i]
line = line + '<a href="#' + name + '">' + name + '</a>'
line = line + '</td>'
line = line + "</tr>"
print line
print "</table><br><br>"
print section_synopsis_footer
print description_header
print self.make_html_items( section.description )
print description_footer
def block_enter( self, block ):
print block_header
# place html anchor if needed
if block.name:
print '<h4><a name="' + block.name + '">' + block.name + '</a></h4>'
# dump the block C source lines now
if block.code:
header = ''
for f in self.headers.keys():
if block.source.filename.find( f ) >= 0:
header = self.headers[f] + ' (' + f + ')'
break;
# if not header:
# sys.stderr.write( \
# 'WARNING: No header macro for ' + block.source.filename + '.\n' )
if header:
print header_location_header
print 'Defined in ' + header + '.'
print header_location_footer
print source_header
for l in block.code:
print self.html_source_quote( l, block.name )
print source_footer
def markup_enter( self, markup, block ):
if markup.tag == "description":
print description_header
else:
print marker_header + markup.tag + marker_inter
self.print_html_markup( markup )
def markup_exit( self, markup, block ):
if markup.tag == "description":
print description_footer
else:
print marker_footer
def block_exit( self, block ):
print block_footer_start + self.file_prefix + "index.html" + \
block_footer_middle + self.file_prefix + "toc.html" + \
block_footer_end
def section_exit( self, section ):
print html_footer
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section, self.file_prefix + section.name + '.html' )
# eof
| 31.608466 | 97 | 0.524216 |
7944f327e3645e498e890d75d5e3aa197f2fba82 | 4,523 | py | Python | scikit-learn-weighted_kde/examples/decomposition/plot_pca_vs_fa_model_selection.py | RTHMaK/git-squash-master | 76c4c8437dd18114968e69a698f4581927fcdabf | [
"BSD-2-Clause"
] | 1 | 2021-11-26T12:22:13.000Z | 2021-11-26T12:22:13.000Z | scikit-learn-weighted_kde/examples/decomposition/plot_pca_vs_fa_model_selection.py | RTHMaK/git-squash-master | 76c4c8437dd18114968e69a698f4581927fcdabf | [
"BSD-2-Clause"
] | null | null | null | scikit-learn-weighted_kde/examples/decomposition/plot_pca_vs_fa_model_selection.py | RTHMaK/git-squash-master | 76c4c8437dd18114968e69a698f4581927fcdabf | [
"BSD-2-Clause"
] | null | null | null | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| 35.896825 | 79 | 0.670573 |
7944f439eb9b2e729918cf54e37781a4078ec0f2 | 7,899 | py | Python | tensorflow/python/tpu/datasets.py | ashutom/tensorflow-upstream | c16069c19de9e286dd664abb78d0ea421e9f32d4 | [
"Apache-2.0"
] | 10 | 2021-05-25T17:43:04.000Z | 2022-03-08T10:46:09.000Z | tensorflow/python/tpu/datasets.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | tensorflow/python/tpu/datasets.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 6 | 2016-09-07T04:00:15.000Z | 2022-01-12T01:47:38.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Library of Cloud TPU helper functions for data loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Callable, Optional, Text, Union
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import functional_ops
def _TextLineDataset(filename: Text) -> dataset_ops.Dataset:
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TextLineDataset(filename, buffer_size=buffer_size)
return dataset
def _TFRecordDataset(filename: Text) -> dataset_ops.Dataset:
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = readers.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
_FILETYPE_MAP = {
'tfrecord': _TFRecordDataset,
'textline': _TextLineDataset,
'text': _TextLineDataset,
}
def StreamingFilesDataset(
files: Union[Text, dataset_ops.Dataset],
filetype: Optional[Union[Text, Callable[[Text],
dataset_ops.Dataset]]] = None,
file_reader_job: Optional[Text] = None,
worker_job: Optional[Text] = None,
num_epochs: Optional[int] = None,
filename_shuffle_buffer_size: Optional[Union[int, bool]] = None,
num_parallel_reads: Optional[int] = None,
batch_transfer_size: Optional[Union[int, bool]] = None,
sloppy: bool = True) -> dataset_ops.Dataset:
"""StreamingFilesDataset constructs a dataset to stream from workers (GCE VM).
Because Cloud TPUs are allocated over the network, a Cloud TPU cannot read
files local to your GCE VM. In order to train using files stored on your local
VM (e.g. on local SSD for extreme performance), use the StreamingFilesDataset
helper to generate a dataset to feed your Cloud TPU with files from your GCE
VM.
The resulting dataset may return an OutOfRangeError if there are no files
found as a result of the fileglob expansion.
Note: StreamingFilesDataset assumes that the session is using a
TPUClusterResolver and has therefore a worker and a coordinator job. File
loading will be done on the coordinator job.
Args:
files: A string glob to match files, or a `tf.data.Dataset` generating file
names.
filetype: A string (one of 'tfrecord', or 'textline') or a single-argument
TensorFlow function that when given a filename returns a dataset.
file_reader_job: An optional string that corresponds to the job that should
perform the file reads.
worker_job: An optional string that corresponds to the job that should
process the tensors (i.e. your GPU or TPU worker).
num_epochs: The number of epochs through the training set that should be
generated. By default, it will repeat infinitely.
filename_shuffle_buffer_size: An optional integer whose value controls the
shuffling of the file names. If you would like to read from the files in
the same order, set to 0 or False.
num_parallel_reads: An optional integer controlling the number of files to
read from concurrently. (Set to 1 for no parallelism.)
batch_transfer_size: An optional integer controlling the batching used to
amortize the remote function invocation overhead. Set to a very large
number to increase throughput. Set to a very small number to reduce memory
consumption. Set to False to skip batching.
sloppy: (Optional.) If `False`, read input data while maintaining a
deterministic order. (This may have significant performance impacts.)
sloppy defaults to: True.
Returns:
A `tf.data.Dataset` with an infinite stream of elements generated by a
parallel interleaving of the set of files matched (or generated) by `files`
with a type is the output of the dataset specified by `filetype`.
Raises:
ValueError: if any argument is not of the expected type.
"""
if filetype is None:
filetype = 'tfrecord'
if isinstance(filetype, str):
if filetype not in _FILETYPE_MAP:
raise ValueError('Unexpected filetype: %s' % filetype)
reader_fn = _FILETYPE_MAP[filetype]
elif callable(filetype):
reader_fn = filetype
else:
raise ValueError('filetype should be a string or a callable')
file_reader_job = file_reader_job or 'coordinator'
worker_job = worker_job or 'worker'
if filename_shuffle_buffer_size is None:
filename_shuffle_buffer_size = 4096
num_parallel_reads = num_parallel_reads or 8
if batch_transfer_size is None:
batch_transfer_size = 256
if file_reader_job == 'coordinator':
file_reader_device = '/job:coordinator/task:0'
else:
file_reader_device = '/job:%s' % file_reader_job
with ops.device(file_reader_device):
if isinstance(files, str):
source_dataset = dataset_ops.Dataset.list_files(files)
elif isinstance(files, dataset_ops.DatasetV2):
source_dataset = files
else:
raise ValueError('files was not a string or a dataset: %s' % files)
if filename_shuffle_buffer_size:
source_dataset = source_dataset.shuffle(
buffer_size=filename_shuffle_buffer_size)
source_dataset = source_dataset.apply(
interleave_ops.parallel_interleave(
reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))
source_dataset = source_dataset.repeat(num_epochs)
if batch_transfer_size:
source_dataset = source_dataset.batch(batch_transfer_size)
source_dataset = source_dataset.prefetch(1)
source_iterator = dataset_ops.make_one_shot_iterator(source_dataset)
source_handle = source_iterator.string_handle()
@function.Defun(dtypes.string)
def LoadingFunc(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(source_dataset),
dataset_ops.get_legacy_output_shapes(source_dataset))
return remote_iterator.get_next()
def MapFn(unused_input):
source_dataset_output_types = dataset_ops.get_legacy_output_types(
source_dataset)
if isinstance(source_dataset_output_types, dtypes.DType):
output_types = [source_dataset_output_types]
elif isinstance(source_dataset_output_types, (list, tuple)):
output_types = source_dataset_output_types
else:
raise ValueError('source dataset has invalid output types')
remote_calls = functional_ops.remote_call(
args=[source_handle],
Tout=output_types,
f=LoadingFunc,
target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)
if len(remote_calls) == 1:
return remote_calls[0]
else:
return remote_calls
with ops.device('/job:%s' % worker_job):
output_dataset = dataset_ops.Dataset.range(2).repeat().map(
MapFn, num_parallel_calls=4 if sloppy else None)
output_dataset = output_dataset.prefetch(1)
if batch_transfer_size:
# Undo the batching used during the transfer.
output_dataset = output_dataset.unbatch().prefetch(1)
return output_dataset
| 39.495 | 80 | 0.741233 |
7944f544b9638ae5c76bcac11a42938441f4054b | 427 | py | Python | kivy/tests/__init__.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 13,889 | 2015-01-01T06:43:41.000Z | 2022-03-31T17:37:56.000Z | kivy/tests/__init__.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 4,570 | 2015-01-01T17:58:52.000Z | 2022-03-31T18:42:16.000Z | kivy/tests/__init__.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 3,786 | 2015-01-01T09:20:45.000Z | 2022-03-30T21:15:05.000Z | from kivy.tests.common import GraphicUnitTest, UnitTestTouch, UTMotionEvent, \
async_run
try:
from kivy.tests.async_common import UnitKivyApp
except SyntaxError:
# async app tests would be skipped due to async_run forcing it to skip so
# it's ok to be None as it won't be used anyway
UnitKivyApp = None
__all__ = ('GraphicUnitTest', 'UnitTestTouch', 'UTMotionEvent', 'async_run',
'UnitKivyApp')
| 35.583333 | 78 | 0.728337 |
7944f5931328d240cdb715c84c554b120b3e3ab3 | 737 | py | Python | pitop/processing/algorithms/faces/core/emotion.py | pi-top/pi-top-Python-SDK | 6c83cc5f612d77f86f8d391c7f2924a28f7b1232 | [
"Apache-2.0"
] | 28 | 2020-11-24T08:02:58.000Z | 2022-02-27T18:37:33.000Z | pitop/processing/algorithms/faces/core/emotion.py | pi-top/pi-top-Python-SDK | 6c83cc5f612d77f86f8d391c7f2924a28f7b1232 | [
"Apache-2.0"
] | 263 | 2020-11-10T14:35:10.000Z | 2022-03-31T12:35:13.000Z | pitop/processing/algorithms/faces/core/emotion.py | pi-top/pi-top-Python-SDK | 6c83cc5f612d77f86f8d391c7f2924a28f7b1232 | [
"Apache-2.0"
] | 1 | 2022-01-31T22:48:35.000Z | 2022-01-31T22:48:35.000Z | class Emotion:
def __init__(self):
self._type = None
self._confidence = 0.0
self._robot_view = None
def clear(self):
self.type = None
self.confidence = 0.0
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def confidence(self):
return self._confidence
@confidence.setter
def confidence(self, value):
self._confidence = value
@property
def robot_view(self):
return self._robot_view
@robot_view.setter
def robot_view(self, value):
self._robot_view = value
@property
def found(self):
return self.type is not None
| 19.394737 | 36 | 0.598372 |
7944f5f11c31c1b81c4fb28472df394d7ef3bdb9 | 5,647 | py | Python | esda/join_counts_local.py | jeffcsauer/esda | 5a7e4ff67eb18bfc0a529fbac9d5a4aa1d90b2c0 | [
"BSD-3-Clause"
] | 145 | 2017-04-06T06:28:08.000Z | 2022-03-26T18:31:29.000Z | esda/join_counts_local.py | jeffcsauer/esda | 5a7e4ff67eb18bfc0a529fbac9d5a4aa1d90b2c0 | [
"BSD-3-Clause"
] | 150 | 2017-03-13T23:18:56.000Z | 2022-03-07T06:51:54.000Z | esda/join_counts_local.py | jeffcsauer/esda | 5a7e4ff67eb18bfc0a529fbac9d5a4aa1d90b2c0 | [
"BSD-3-Clause"
] | 50 | 2017-03-13T21:19:31.000Z | 2022-03-31T09:30:46.000Z | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from libpysal import weights
from esda.crand import crand as _crand_plus, njit as _njit, _prepare_univariate
PERMUTATIONS = 999
class Join_Counts_Local(BaseEstimator):
"""Univariate Local Join Count Statistic"""
def __init__(
self,
connectivity=None,
permutations=PERMUTATIONS,
n_jobs=1,
keep_simulations=True,
seed=None,
):
"""
Initialize a Local_Join_Count estimator
Arguments
---------
connectivity : scipy.sparse matrix object
the connectivity structure describing
the relationships between observed units.
Need not be row-standardized.
permutations : int
number of random permutations for calculation of pseudo
p_values
n_jobs : int
Number of cores to be used in the conditional randomisation. If -1,
all available cores are used.
keep_simulations : Boolean
(default=True)
If True, the entire matrix of replications under the null
is stored in memory and accessible; otherwise, replications
are not saved
seed : None/int
Seed to ensure reproducibility of conditional randomizations.
Must be set here, and not outside of the function, since numba
does not correctly interpret external seeds
nor numpy.random.RandomState instances.
Attributes
----------
LJC : numpy array
array containing the univariate
Local Join Count (LJC).
p_sim : numpy array
array containing the simulated
p-values for each unit.
"""
self.connectivity = connectivity
self.permutations = permutations
self.n_jobs = n_jobs
self.keep_simulations = keep_simulations
self.seed = seed
def fit(self, y, n_jobs=1, permutations=999):
"""
Arguments
---------
y : numpy.ndarray
array containing binary (0/1) data
Returns
-------
the fitted estimator.
Notes
-----
Technical details and derivations found in :cite:`AnselinLi2019`.
Examples
--------
>>> import libpysal
>>> w = libpysal.weights.lat2W(4, 4)
>>> y = np.ones(16)
>>> y[0:8] = 0
>>> LJC_uni = Local_Join_Count(connectivity=w).fit(y)
>>> LJC_uni.LJC
>>> LJC_uni.p_sim
Guerry data replicating GeoDa tutorial
>>> import libpysal
>>> import geopandas as gpd
>>> guerry = libpysal.examples.load_example('Guerry')
>>> guerry_ds = gpd.read_file(guerry.get_path('Guerry.shp'))
>>> guerry_ds['SELECTED'] = 0
>>> guerry_ds.loc[(guerry_ds['Donatns'] > 10997), 'SELECTED'] = 1
>>> w = libpysal.weights.Queen.from_dataframe(guerry_ds)
>>> LJC_uni = Local_Join_Count(connectivity=w).fit(guerry_ds['SELECTED'])
>>> LJC_uni.LJC
>>> LJC_uni.p_sim
"""
# Need to ensure that the np.array() are of
# dtype='float' for numba
y = np.array(y, dtype="float")
w = self.connectivity
# Fill the diagonal with 0s
w = weights.util.fill_diagonal(w, val=0)
w.transform = "b"
keep_simulations = self.keep_simulations
n_jobs = self.n_jobs
seed = self.seed
self.y = y
self.n = len(y)
self.w = w
self.LJC = self._statistic(y, w)
if permutations:
self.p_sim, self.rjoins = _crand_plus(
z=self.y,
w=self.w,
observed=self.LJC,
permutations=permutations,
keep=keep_simulations,
n_jobs=n_jobs,
stat_func=_ljc_uni,
)
# Set p-values for those with LJC of 0 to NaN
self.p_sim[self.LJC == 0] = "NaN"
return self
@staticmethod
def _statistic(y, w):
# Create adjacency list. Note that remove_symmetric=False - this is
# different from the esda.Join_Counts() function.
adj_list = w.to_adjlist(remove_symmetric=False)
zseries = pd.Series(y, index=w.id_order)
focal = zseries.loc[adj_list.focal].values
neighbor = zseries.loc[adj_list.neighbor].values
LJC = (focal == 1) & (neighbor == 1)
adj_list_LJC = pd.DataFrame(
adj_list.focal.values, LJC.astype("uint8")
).reset_index()
adj_list_LJC.columns = ["LJC", "ID"]
adj_list_LJC = adj_list_LJC.groupby(by="ID").sum()
LJC = np.array(adj_list_LJC.LJC.values, dtype="float")
return LJC
# --------------------------------------------------------------
# Conditional Randomization Function Implementations
# --------------------------------------------------------------
# Note: scaling not used
@_njit(fastmath=True)
def _ljc_uni(i, z, permuted_ids, weights_i, scaling):
self_weight = weights_i[0]
other_weights = weights_i[1:]
zi, zrand = _prepare_univariate(i, z, permuted_ids, other_weights)
return zi * (zrand @ other_weights)
| 34.018072 | 94 | 0.53586 |
7944f614435014360ed6746ff72e6e9623ff500d | 1,316 | py | Python | api/managers.py | MarcelIrawan/django-template | 03944f99979c303ee364b24385aa827a739cadf0 | [
"MIT"
] | 1 | 2020-11-28T12:55:56.000Z | 2020-11-28T12:55:56.000Z | api/managers.py | MarcelIrawan/django-template | 03944f99979c303ee364b24385aa827a739cadf0 | [
"MIT"
] | null | null | null | api/managers.py | MarcelIrawan/django-template | 03944f99979c303ee364b24385aa827a739cadf0 | [
"MIT"
] | null | null | null | from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import ugettext_lazy as _
class CustomUserManager(BaseUserManager):
"""
Custom user model manager where email is the unique identifier for
authentication instead of username
"""
def create_user(self, email, password, **extra_fields):
"""
create and save User with email and password
"""
if not email:
raise ValueError(_('Email must be set'))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
"""
create and save SuperUser with email and password
"""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser=True'))
user = self.create_user(email, password, **extra_fields)
return user
| 36.555556 | 72 | 0.658055 |
7944f674f2a9cd75f75b4006ba5d740effe9ea59 | 1,449 | py | Python | multiprom/metrics.py | Bogdanp/multiprom | 36f8352cfdfcc461f07573dd9cbcdfcbf2a53246 | [
"BSD-3-Clause"
] | 3 | 2018-12-14T00:56:02.000Z | 2021-07-05T06:07:12.000Z | multiprom/metrics.py | Bogdanp/multiprom | 36f8352cfdfcc461f07573dd9cbcdfcbf2a53246 | [
"BSD-3-Clause"
] | null | null | null | multiprom/metrics.py | Bogdanp/multiprom | 36f8352cfdfcc461f07573dd9cbcdfcbf2a53246 | [
"BSD-3-Clause"
] | 1 | 2021-05-01T06:49:24.000Z | 2021-05-01T06:49:24.000Z | class Metric:
"""
Parameters:
name(str)
description(str)
"""
kind = None
def __init__(self, collector, name, description):
self.collector = collector
self.name = name.encode("utf-8")
self.description = description.encode("utf-8") if description else None
self.register()
def register(self):
"""Register this metric with its collector.
"""
self.collector.send(encode(b"reg", self.kind, self.name, self.description))
class Counter(Metric):
kind = b"counter"
def inc(self, n=1, **labels):
"""Increment this counter by the given amount.
Parameters:
n(int or float): This must be a positive amount.
"""
assert n >= 0, "amounts must be positive"
self.collector.send(encode(b"inc", self.name, str(n), **labels))
def encode(operation, *args, **labels):
"""Encode a message so that it can be sent over the wire.
Parameters:
operation(str)
\*args(tuple[str])
\**labels(dict)
Returns:
bytes
"""
if labels:
args += (",".join(f'{name}="{labels[name]}"' for name in sorted(labels)),)
message = operation
for arg in args:
message += b"\0"
if arg:
message += arg if isinstance(arg, bytes) else arg.encode("utf-8")
message_len = str(len(message)).encode("ascii")
return b"$" + message_len + b"\0" + message
| 25.421053 | 83 | 0.58109 |
7944f6c6d8b189575f95e4cb237910bad9f90218 | 38,414 | py | Python | superai/data_program/protocol/transport.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 1 | 2020-12-03T18:18:16.000Z | 2020-12-03T18:18:16.000Z | superai/data_program/protocol/transport.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 13 | 2021-02-22T18:27:58.000Z | 2022-02-10T08:14:10.000Z | superai/data_program/protocol/transport.py | mysuperai/superai-sdk | 796c411c6ab69209600bf727e8fd08c20f4d67b1 | [
"Apache-2.0"
] | 1 | 2021-04-27T12:38:47.000Z | 2021-04-27T12:38:47.000Z | """ A transport layer for communicating with the Agent """
from __future__ import absolute_import, division, print_function, unicode_literals
import concurrent
import enum
import functools
import io
import json
import os
import signal
import sys
from logging import FATAL, WARN
from threading import local, Lock, Thread
import jsonpickle
import sentry_sdk
from futures_then import ThenableFuture as Future
from jsonschema.exceptions import ValidationError
from superai.data_program.Exceptions import *
from superai.data_program.experimental import forget_memo
from superai.log import logger
from superai.utils import sentry_helper
sentry_helper.init()
logger = logger.get_logger(__name__)
class OperationStatus(str, enum.Enum):
SUCCEEDED = "SUCCEEDED"
FAILED = "FAILED"
NO_SUITABLE_COMBINER = "NO_SUITABLE_COMBINER"
TASK_EXPIRED = "TASK_EXPIRED"
JOB_EXPIRED = "JOB_EXPIRED"
# A message to the agent
# meta_info: contains the information about the attempted operation and other details on how to interpret the body.
# body: contains the actual output of the operation (e.g. job output)
class message:
class metaInfo:
def __init__(self, version, operation_status):
self.version = version
self.operation_status = operation_status.value
def __init__(self, body=None, operation_status=OperationStatus.SUCCEEDED, version=0.1):
self.meta_info = self.metaInfo(version, operation_status)
self.body = body
@property
def to_json(self):
return jsonpickle.encode(self, unpicklable=False)
class future(Future):
def __init__(self):
Future.__init__(self)
self._cookie = None
def set_cookie(self, cookie):
self._cookie = cookie
def cookie(self):
return self._cookie
# Dictionary to store all local workflows
_workflow_functions = {}
_workflow_functions_lock = Lock() # Lock to protect _workflow_callbacks logic
# Dictionary to store all futures
_task_futures = {}
_task_futures_lock = Lock() # Lock to protect _task_dictionary logic
# Job input parameter
_job_input = {}
_job_input_data = {}
_job_input_lock = Lock()
# Job snapshot state
_snapshot = {}
_snapshot_data = {}
_snapshot_lock = Lock()
# Child job result
_child_job = {}
_child_job_lock = Lock()
_terminate_flag = {}
_terminate_flag_lock = Lock()
_pipe_lock = Lock()
# in-out fifo pipes for communication with Agent
_in_pipe = (
io.open("/tmp/canotic.in." + os.environ["CANOTIC_AGENT"], "r", encoding="utf-8")
if "CANOTIC_AGENT" in os.environ
else None
)
_out_pipe = (
io.open("/tmp/canotic.out." + os.environ["CANOTIC_AGENT"], "w", encoding="utf-8")
if "CANOTIC_AGENT" in os.environ
else None
)
_context = local()
if "CANOTIC_AGENT" in os.environ and "CANOTIC_SERVE" not in os.environ:
_context.id = int(os.environ["CANOTIC_AGENT"])
_context.uuid = os.environ["CANOTIC_AGENT"]
_context.sequence = 0
_context.bill = None
_context.app_id = None
_context.project_id = None
_context.is_child = False
_context.metadata = None
_context.job_type = None
with _task_futures_lock:
_task_futures[_context.id] = {}
with _job_input_lock:
_job_input[_context.id] = future()
_job_input_data[_context.id] = None
with _snapshot_lock:
_snapshot[_context.id] = None
_snapshot_data[_context.id] = None
with _child_job_lock:
_child_job[_context.id] = None
with _terminate_flag_lock:
_terminate_flag[_context.id] = False
def terminate_guard(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
with _terminate_flag_lock:
if _terminate_flag[_context.id]:
raise ValueError("Workflow instance {} terminated".format(_context.id))
return function(*args, **kwargs)
return wrapper
def writeln_to_pipe_and_flush(text_data):
try:
print("Called by " + sys._getframe(1).f_code.co_name + " to pipe :" + text_data)
_out_pipe.write(text_data)
except TypeError as e:
print(e)
_out_pipe.write(unicode(text_data))
sentry_sdk.capture_exception(e)
except Exception as e:
print(e)
logger.exception("Exception writing text_data to pipe")
sentry_sdk.capture_exception(e)
try:
try:
_out_pipe.write("\n")
except Exception as e:
print(e)
logger.exception("Exception writing \\n to pipe")
sentry_sdk.capture_exception(e)
finally:
_out_pipe.flush()
except BrokenPipeError as bp:
sentry_sdk.capture_exception(bp)
logger.exception(
f"[BrokenPipeError] {str(bp)} \nfilename {bp.filename if bp.filename else None} \nfilename2 {bp.filename2 if bp.filename2 else None} \nstrerror {bp.strerror if bp.strerror else None}"
)
class child_result:
def __init__(self, result):
self._id = result["id"] if "id" in result else None
self._status = result["status"] if "status" in result else None
self._response = result["response"] if "response" in result else None
self._data_ref = result["dataRef"] if "dataRef" in result else None
self._timestamp = result["timestamp"] if "timestamp" in result else None
self._data = None
@terminate_guard
def id(self):
return self._id
@terminate_guard
def status(self):
return self._status
@terminate_guard
def response(self):
return self._response
@terminate_guard
def timestamp(self):
return self._timestamp
@terminate_guard
def data(self):
if self._data is not None:
return self._data.result()
if self._data_ref is None:
return None
global _child_job
self._data = _child_job[_context.id] = future()
params = {
"type": "LOAD_CHILD_DATA",
"id": _context.id,
"sequence": -1,
"data": self._data_ref,
}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
return self._data.result()
@terminate_guard
def __getitem__(self, key):
if "timestamp" == key:
return self._timestamp
raise ValueError("Expected 'timestamp' key, got " + key)
class child_job_future(future):
def __init__(self):
future.__init__(self)
def set_result(self, response):
super(child_job_future, self).set_result(child_result(response))
class task_result:
def __init__(self, result):
self._result = result
@terminate_guard
def id(self):
return self._result["id"] if "id" in self._result else None
@terminate_guard
def status(self):
return self._result["status"] if "status" in self._result else None
@terminate_guard
def hero(self):
return self._result["workerId"] if "workerId" in self._result else None
@terminate_guard
def mturk_id(self):
return self._result.get("hero", {}).get("mturkId")
@terminate_guard
def values(self):
return self._result["values"] if "values" in self._result else None
@terminate_guard
def sequence(self):
return self._result["sequence"] if "sequence" in self._result else None
def task(self):
return self.sequence()
@terminate_guard
def timestamp(self):
return self._result["timestamp"] if "timestamp" in self._result else None
@terminate_guard
def __getitem__(self, key):
return self._result[key]
@terminate_guard
def get(self, key):
return self._result.get(key)
@terminate_guard
def response(self):
return self._result
class task_future(future):
def __init__(self):
future.__init__(self)
def set_result(self, response):
super(task_future, self).set_result(task_result(response))
#
# qualifications = [{
# "name":"abc",
# "value": 12,
# "operator": 'EQUALS_TO'
# },{
# "name":"def",
# "value": 3,
# "operator": 'GREATER_THAN'
# }]
#
# 'EQUALS_TO'
# 'EXISTS'
# 'GREATER_THAN'
# 'GREATER_THAN_OR_EQUALS_TO'
# 'LESS_THAN'
# 'LESS_THAN_OR_EQUALS_TO'
# 'NOT_EXISTS'
#
@terminate_guard
def schedule_task(
name,
humans,
price,
input,
ai_input,
output,
title,
description,
paragraphs,
completed_tasks,
total_tasks,
includedIds,
excludedIds,
explicitId,
timeToResolveSec,
timeToUpdateSec,
timeToExpireSec,
qualifications,
show_reject,
groups,
excluded_groups,
amount,
schema_version,
):
"""Schedule task for execution by inserting it into the future table"""
seq = _context.sequence
_context.sequence += 1
constraints = {}
if humans is not None:
constraints["emails"] = humans
if price is not None:
constraints["priceTag"] = price
if amount is not None:
constraints["amount"] = amount
if excludedIds is not None:
constraints["excluded"] = excludedIds
if includedIds is not None:
constraints["included"] = includedIds
if excluded_groups is not None:
constraints["excludedGroups"] = excluded_groups
if groups is not None:
constraints["groups"] = groups
if explicitId is not None:
constraints["id"] = explicitId
if timeToResolveSec is not None:
constraints["timeToResolve"] = 1000 * timeToResolveSec
if timeToUpdateSec is not None:
constraints["timeToUpdate"] = 1000 * timeToUpdateSec
if timeToExpireSec is not None:
constraints["timeToExpire"] = 1000 * timeToExpireSec
if qualifications is not None:
constraints["metrics"] = qualifications
if (amount is None) and (price is None):
constraints["priceTag"] = "EASY"
params = {
"type": "EVALUATE_TASK",
"id": _context.id,
"sequence": seq,
"name": name,
# 'platform': 'CANOTIC',
"constraints": constraints,
"payload": {},
}
params["payload"]["schemaVersion"] = schema_version
params["payload"]["input"] = input
params["payload"]["output"] = output
if ai_input is not None:
params["payload"]["modelInput"] = ai_input
params["payload"]["taskInfo"] = {}
params["payload"]["actions"] = {}
if completed_tasks is not None:
params["payload"]["taskInfo"]["completedTasks"] = completed_tasks
if total_tasks is not None:
params["payload"]["taskInfo"]["totalTasks"] = total_tasks
if title is not None:
params["payload"]["taskInfo"]["title"] = title
if description is not None:
params["payload"]["taskInfo"]["description"] = description
if paragraphs is not None:
params["payload"]["taskInfo"]["paragraphs"] = paragraphs
params["payload"]["actions"]["showReject"] = show_reject
f = None
with _task_futures_lock:
if seq not in _task_futures[_context.id]:
_task_futures[_context.id][seq] = task_future()
f = _task_futures[_context.id][seq]
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
return f
def schedule_workflow(
name,
param,
constraints,
data_folder,
tag,
timeToExpireSec,
suffix,
app_metrics,
app_params,
metadata,
):
"""Schedule task for execution by inserting it into the future table"""
seq = _context.sequence
_context.sequence += 1
params = {
"type": "EXECUTE_JOB",
"id": _context.id,
"sequence": seq,
"workflow": name,
}
if suffix is not None:
params["suffix"] = suffix
if timeToExpireSec is not None:
params["timeToExpire"] = 1000 * timeToExpireSec
if param is not None:
params["subject"] = param
if data_folder is not None:
params["data"] = data_folder
if constraints is not None:
params["constraints"] = constraints
if tag is not None:
params["tag"] = tag
if app_params is not None or app_metrics is not None:
params["context"] = {}
if app_metrics is not None:
params["context"]["app_metrics"] = app_metrics
if app_params is not None:
params["context"]["app_params"] = app_params
if metadata is not None:
params["metadata"] = metadata
f = None
with _task_futures_lock:
if seq not in _task_futures[_context.id]:
_task_futures[_context.id][seq] = child_job_future()
f = _task_futures[_context.id][seq]
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
return f
@terminate_guard
def resolve_job(response, data_folder, bill):
"""Resolve a job and persist the response"""
seq = _context.sequence
_context.sequence += 1
params = {
"type": "RESOLVE_JOB",
"id": _context.id,
"sequence": seq,
"annotation": response,
}
if data_folder is not None:
params["data"] = data_folder
if bill is None:
bill = _context.bill
if bill is not None:
params["bill"] = bill
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
f = None
with _task_futures_lock:
if seq not in _task_futures[_context.id]:
_task_futures[_context.id][seq] = future()
f = _task_futures[_context.id][seq]
f.result()
@terminate_guard
def suspend_job_for_no_combiner():
"""Suspend a job"""
seq = _context.sequence
_context.sequence += 1
params = {"type": "SUSPEND_JOB", "id": _context.id, "sequence": seq}
message_for_agent = message(params, OperationStatus.NO_SUITABLE_COMBINER)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
f = None
with _task_futures_lock:
if seq not in _task_futures[_context.id]:
_task_futures[_context.id][seq] = future()
f = _task_futures[_context.id][seq]
f.result()
@terminate_guard
def fail_job(error):
"""Fail a job"""
print(error)
seq = _context.sequence
_context.sequence += 1
params = {
"type": "FAIL_JOB",
"id": _context.id,
"sequence": seq,
"error": error,
}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
f = None
with _task_futures_lock:
if seq not in _task_futures[_context.id]:
_task_futures[_context.id][seq] = future()
f = _task_futures[_context.id][seq]
f.result()
@terminate_guard
def internal_error(error):
"""Fail a job"""
print(error)
seq = _context.sequence
_context.sequence += 1
params = {
"type": "INTERNAL_ERROR",
"id": _context.id,
"sequence": seq,
"error": error,
}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
f = None
with _task_futures_lock:
if seq not in _task_futures[_context.id]:
_task_futures[_context.id][seq] = future()
f = _task_futures[_context.id][seq]
f.result()
@terminate_guard
def expire_job(error):
"""Expire a job"""
print(error)
seq = _context.sequence
_context.sequence += 1
params = {
"type": "EXPIRE_JOB",
"id": _context.id,
"sequence": seq,
"error": error,
}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
f = None
with _task_futures_lock:
if seq not in _task_futures[_context.id]:
_task_futures[_context.id][seq] = future()
f = _task_futures[_context.id][seq]
f.result()
def _worklow_thread(id, suffix, response):
_context.id = id
_context.uuid = response["uuid"]
_context.app_id = response.get("appId")
_context.root_app_uuid = response.get("rootAppUuid")
_context.project_id = response["projectId"]
_context.is_child = response["child"]
_context.sequence = 0
_context.bill = None
_context.metadata = response.get("metadata")
_context.job_type = response.get("jobType") if response.get("jobType") else None
_context.priority = response.get("priority")
with _task_futures_lock:
_task_futures[_context.id] = {}
with _job_input_lock:
_job_input[_context.id] = future()
_job_input[_context.id].set_result(response)
_job_input_data[_context.id] = None
with _snapshot_lock:
_snapshot[_context.id] = None
_snapshot_data[_context.id] = None
with _child_job_lock:
_child_job[_context.id] = None
with _terminate_flag_lock:
_terminate_flag[_context.id] = False
try:
subject = response["subject"] if "subject" in response else None
context = response["context"] if "context" in response else None
function = None
with _workflow_functions_lock:
if suffix not in _workflow_functions:
raise ValueError("Unexpected suffix: " + suffix)
function = _workflow_functions[suffix]
result = function(subject, context)
resolve_job(*result) if type(result) == tuple else resolve_job(result, None, None)
except ValidationError as error:
internal_error("\nSchema validation error: {0}".format(error))
with sentry_sdk.push_scope() as scope:
scope.set_tag("job_id", id)
scope.set_tag("job_uuid", _context.uuid)
scope.set_tag("app_id", response.get("appId"))
scope.set_tag("is_child", _context.is_child)
scope.set_tag(
"job_type",
_context.job_type if _context.job_type else response.get("jobType"),
)
scope.set_level(FATAL)
sentry_sdk.capture_exception(error)
logger.exception("ValidationError encountered in _workflow_thread")
except concurrent.futures._base.CancelledError as error:
logger.info(
"Job #{} of type {} cancelled or expired for app_id {}. error {}".format(
_context.uuid, _context.job_type, _context.app_id, str(error)
)
)
except CancelledError as error:
logger.info(
"Job #{} of type {} cancelled or expired for app_id {}. error {}".format(
_context.uuid, _context.job_type, _context.app_id, str(error)
)
)
except ChildJobFailed as error:
fail_job("FAIL_JOB: Job {} child failed".format(_context.uuid))
with sentry_sdk.push_scope() as scope:
scope.set_tag("job_id", id)
scope.set_tag("job_uuid", _context.uuid)
scope.set_tag("app_id", response.get("appId"))
scope.set_tag("is_child", _context.is_child)
scope.set_tag(
"job_type",
_context.job_type if _context.job_type else response.get("jobType"),
)
scope.set_level(FATAL)
sentry_sdk.capture_exception(error)
logger.exception(
"Job #{} of type {} throw child job failed {}. "
"error {}".format(_context.uuid, _context.job_type, _context.app_id, str(error))
)
except QualifierTaskExpired as error:
fail_job("FAIL_JOB :: {}: {}".format(type(error), error))
logger.info(
"Qualifier task expired for Job #{} of type {}. "
"error {}".format(_context.uuid, _context.job_type, str(error))
)
except TaskExpiredMaxRetries as error:
logger.info(
"Task expired after maximum number of retries for Job #{} of type {}. "
"error {}".format(_context.uuid, _context.job_type, str(error))
)
if (_context.job_type and _context.job_type == "COLLABORATOR") or response.get("jobType") == "COLLABORATOR":
expire_job("\nEXPIRE_JOB :: {}: {}".format(type(error), error))
scope_level = WARN
else:
internal_error("\nINTERNAL_ERROR :: {}: {}".format(type(error), error))
scope_level = FATAL
with sentry_sdk.push_scope() as scope:
scope.set_tag("job_id", id)
scope.set_tag("job_uuid", _context.uuid)
scope.set_tag("app_id", _context.app_id)
scope.set_tag("is_child", _context.is_child)
scope.set_tag(
"job_type",
_context.job_type if _context.job_type else response.get("jobType"),
)
scope.set_level(scope_level)
sentry_sdk.capture_exception(error)
except ChildJobInternalError as error:
internal_error("INTERNAL_ERROR: Job {} child threw internal error".format(_context.uuid))
with sentry_sdk.push_scope() as scope:
scope.set_tag("job_id", id)
scope.set_tag("job_uuid", _context.uuid)
scope.set_tag("app_id", response.get("appId"))
scope.set_tag("is_child", _context.is_child)
scope.set_tag(
"job_type",
_context.job_type if _context.job_type else response.get("jobType"),
)
scope.set_level(FATAL)
sentry_sdk.capture_exception(error)
logger.exception(
"Job #{} of type {} throw child job internal error {}. "
"error {}".format(_context.uuid, _context.job_type, _context.app_id, str(error))
)
except EmptyPerformanceError as error:
internal_error("\n INTERNAL_ERROR :: {}: {}".format(type(error), error))
with sentry_sdk.push_scope() as scope:
scope.set_tag("job_id", id)
scope.set_tag("job_uuid", _context.uuid)
scope.set_tag("app_id", response.get("appId"))
scope.set_tag("is_child", _context.is_child)
scope.set_tag(
"job_type",
_context.job_type if _context.job_type else response.get("jobType"),
)
scope.set_level(FATAL)
sentry_sdk.capture_exception(error)
logger.error("Performance not found exception")
except UnsatisfiedMetricsError as error:
suspend_job_for_no_combiner()
with sentry_sdk.push_scope() as scope:
scope.set_tag("job_id", id)
scope.set_tag("job_uuid", _context.uuid)
scope.set_tag("app_id", _context.app_id)
scope.set_tag("is_child", _context.is_child)
scope.set_tag(
"job_type",
_context.job_type if _context.job_type else response.get("jobType"),
)
scope.set_level(WARN)
sentry_sdk.capture_exception(error)
logger.error("Unsatisfied metrics for job: #{}".format(_context.id))
except Exception as ex:
internal_error("\nINTERNAL_ERROR :: {}: {}".format(type(ex), ex))
with sentry_sdk.push_scope() as scope:
scope.set_tag("job_id", id)
scope.set_tag("job_uuid", _context.uuid)
scope.set_tag("app_id", _context.app_id)
scope.set_tag("is_child", _context.is_child)
scope.set_tag(
"job_type",
_context.job_type if _context.job_type else response.get("jobType"),
)
scope.set_level(FATAL)
sentry_sdk.capture_exception(ex)
logger.exception("Exception encountered in _workflow_thread")
finally:
with _task_futures_lock:
del _task_futures[_context.id]
with _job_input_lock:
del _job_input[_context.id]
del _job_input_data[_context.id]
with _snapshot_lock:
del _snapshot[_context.id]
del _snapshot_data[_context.id]
with _child_job_lock:
del _child_job[_context.id]
with _terminate_flag_lock:
del _terminate_flag[_context.id]
del _context.id
del _context.sequence
del _context.bill
def _task_pump():
"""This method waits for incoming response and resolves the corresponding task future"""
while True:
line = _in_pipe.readline().rstrip("\n")
response = json.loads(line)
if "type" not in response:
raise ValueError("`type` is missing in response")
if "id" not in response:
raise ValueError("`id` is missing in response")
id = response["id"]
if "ERROR" == response["type"]:
if "error" not in response:
raise ValueError("Response `type` `ERROR` expects `error` property")
sys.stderr.write("Traceback (most recent call last):\n")
sys.stderr.write(response["error"])
sys.stderr.write("\n")
sys.stderr.flush()
os.kill(os.getpid(), signal.SIGTERM)
elif "JOB_PARAMS" == response["type"]:
if "sequence" in response:
raise ValueError("JOB_PARAMS come out of bound and don't expect to contain 'sequence'")
job_input = None
with _job_input_lock:
job_input = _job_input[id]
job_input.set_result(response)
elif "JOB_DATA" == response["type"]:
if "sequence" in response:
raise ValueError("JOB_DATA come out of bound and don't expect to contain 'sequence'")
job_input_data = None
with _job_input_lock:
job_input_data = _job_input_data[id]
job_input_data.set_result(response["data"] if "data" in response else None)
elif "SNAPSHOT" == response["type"]:
snapshot = None
with _snapshot_lock:
snapshot = _snapshot[id]
snapshot.set_result(response)
if "sequence" in response:
_context.sequence = response["sequence"]
elif "SNAPSHOT_DATA" == response["type"]:
if "sequence" in response:
raise ValueError("SNAPSHOT_DATA come out of bound and don't expect to contain 'sequence'")
snapshot_data = None
with _snapshot_lock:
snapshot_data = _snapshot_data[id]
snapshot.set_result(response["data"] if "data" in response else None)
elif "CHILD_JOB_DATA" == response["type"]:
if "sequence" in response:
raise ValueError("CHILD_JOB_DATA come out of bound and don't expect to contain 'sequence'")
child_job = None
with _child_job_lock:
child_job = _child_job[id]
child_job.set_result(response["data"] if "data" in response else None)
elif "EXECUTE" == response["type"]:
if "suffix" not in response:
raise ValueError("Response `type` `EXECUTE` expects `suffix` property")
suffix = response["suffix"]
thread = Thread(
target=_worklow_thread,
name="{0}-{1}".format(suffix, id),
args=(id, suffix, response),
)
thread.daemon = True
thread.start()
elif "CANCEL" == response["type"] or "SUSPEND" == response["type"]:
with _terminate_flag_lock:
_terminate_flag[id] = True
with _task_futures_lock:
if id in _task_futures:
for seq in _task_futures[id]:
_task_futures[id][seq].cancel()
with _job_input_lock:
if id in _job_input:
_job_input[id].cancel()
if _job_input_data[id] is not None:
_job_input_data[id].cancel()
with _snapshot_lock:
if id in _snapshot:
if _snapshot[id] is not None:
_snapshot[id].cancel()
if _snapshot_data[id] is not None:
_snapshot_data[id].cancel()
with _child_job_lock:
if id in _child_job:
if _child_job[id] is not None:
_child_job[id].cancel()
if response["type"] == "SUSPEND":
job_uuid = response["uuid"]
app_id = response["appId"]
forget_memo(None, prefix=f"{job_uuid}/")
else:
if "sequence" not in response:
raise ValueError("'sequence' expected in inbound message")
seq = response["sequence"]
f = None
with _task_futures_lock:
if id in _task_futures:
if seq not in _task_futures[id]:
if "CHILD_RESPONSE" == response["type"]:
logger.warn("CHILD_RESPONSE:missing_child_job_future id {} seq {}".format(id, seq))
_task_futures[id][seq] = child_job_future()
else:
_task_futures[id][seq] = future()
f = _task_futures[id][seq]
if f is None:
sys.stderr.write("Unexpected id/sequence (late response?): {0}/{1}\n".format(id, seq))
sys.stderr.flush()
else:
f.set_result(response)
if "CANOTIC_AGENT" in os.environ:
_task_thread = Thread(target=_task_pump, name="pump")
_task_thread.daemon = "CANOTIC_SERVE" not in os.environ
_task_thread.start()
@terminate_guard
def get_job_data():
global _job_input_data
global _job_input_lock
job_input_data = None
with _job_input_lock:
if _job_input_data[_context.id] is not None:
job_input_data = _job_input_data[_context.id]
if job_input_data is not None:
return job_input_data.result()
with _job_input_lock:
_job_input_data[_context.id] = job_input_data = future()
params = {"type": "LOAD_JOB_DATA", "id": _context.id, "sequence": -1}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
return job_input_data.result()
@terminate_guard
def save_hero_qualification(hero, qualification, value):
"""Perist hero metric"""
params = {
"type": "STORE_METRIC",
"id": _context.id,
"sequence": -1,
"hero": hero,
"metric": qualification,
}
if value is not None:
params["value"] = value
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
@terminate_guard
def remove_hero_qualification(hero, qualification):
"""Perist hero metric"""
params = {
"type": "REMOVE_METRIC",
"id": _context.id,
"hero": hero,
"metric": qualification,
}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
@terminate_guard
def save_snapshot(snapshot, data_folder):
seq = _context.sequence
params = {
"type": "SNAPSHOT",
"id": _context.id,
"sequence": seq,
"snapshot": snapshot,
}
if data_folder is not None:
params["data"] = data_folder
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
@terminate_guard
def load_snapshot():
global _snapshot
global _snapshot_lock
snapshot = None
with _snapshot_lock:
if _snapshot[_context.id] is not None:
snapshot = _snapshot[_context.id]
if snapshot is not None:
return snapshot.result()
with _snapshot_lock:
_snapshot[_context.id] = snapshot = future()
params = {
"type": "RESTORE_SNAPSHOT",
"id": _context.id,
"sequence": _context.sequence,
}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
return snapshot.result()
@terminate_guard
def load_snapshot_data():
global _snapshot_data
global _snapshot_lock
snapshot_data = None
with _snapshot_lock:
if _snapshot_data[_context.id] is not None:
snapshot_data = _snapshot_data[_context.id]
if snapshot_data is not None:
return snapshot_data.result()
with _snapshot_lock:
_snapshot_data[_context.id] = snapshot_data = future()
params = {"type": "LOAD_SNAPSHOT_DATA", "id": _context.id, "sequence": -1}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
return snapshot_data.result()
@terminate_guard
def send_report(status):
params = {
"type": "STORE_REPORT",
"id": _context.id,
"sequence": -1,
"status": status,
}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
def run_model_predict(predict_func, port=8080, context=None):
_in_pipe = io.open("/tmp/canotic.in." + os.environ["CANOTIC_PREDICT"], "r", encoding="utf-8")
_out_pipe = io.open("/tmp/canotic.out." + os.environ["CANOTIC_PREDICT"], "w", encoding="utf-8")
line = _in_pipe.readline()
while len(line) != 0:
line = line.rstrip("\n")
request = json.loads(line)
if "type" not in request:
raise ValueError("Message \`type\` is missing in request")
if "PREDICT" != request["type"]:
raise ValueError("Only message \`type\` 'PREDICT' is expected in serve_predict mode")
if "sequence" not in request:
raise ValueError("'sequence' expected in inbound message")
sequence = request["sequence"]
response = predict_func(request["input"], context)
params = {"type": "PREDICTION", "sequence": sequence, "response": response}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
line = _in_pipe.readline()
def subscribe_workflow(function, prefix, suffix, schema=None):
"""Subscribe workflow"""
if suffix is None:
raise ValueError("Suffix is missing")
with _workflow_functions_lock:
_workflow_functions[suffix] = function
params = {"type": "SUBSCRIBE", "suffix": suffix}
if schema is not None:
params["schema"] = schema
if prefix is not None:
params["workflow"] = prefix
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
@terminate_guard
def attach_bill(amount):
_context.bill = amount
@terminate_guard
def send_reward(task, amount, reason):
"""Give hero a reward"""
if task is None:
raise ValueError("Reward task is missing")
if amount is None:
raise ValueError("Reward amount is missing")
params = {
"type": "REWARD_HERO",
"id": _context.id,
"sequence": task,
"amount": amount,
"reason": reason,
}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
@terminate_guard
def decline_result(task, reason):
if reason is None:
raise ValueError("Decline reason is missing")
params = {"type": "DECLINE", "id": _context.id, "sequence": task, "reason": reason}
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
@terminate_guard
def get_context_id():
return _context.uuid
@terminate_guard
def get_job_id():
return _context.id
@terminate_guard
def get_root_app_uuid():
if hasattr(_context, "root_app_uuid"):
return _context.root_app_uuid
else:
return None
@terminate_guard
def get_job_priority():
if hasattr(_context, "priority"):
return _context.priority
else:
return None
@terminate_guard
def get_context_app_id():
return _context.app_id
@terminate_guard
def get_context_project_id():
return _context.project_id
@terminate_guard
def get_context_is_child():
return _context.is_child
@terminate_guard
def get_context_metadata():
return _context.metadata
@terminate_guard
def get_context_job_type():
return _context.job_type
@terminate_guard
def schedule_mtask(
name,
input,
output,
title,
description,
paragraphs,
show_reject,
amount,
sandbox,
timeToResolveSec=None,
timeToExpireSec=None,
qualifications=None,
):
"""Schedule task for execution by inserting it into the future table"""
seq = _context.sequence
_context.sequence += 1
constraints = {}
if amount is not None:
constraints["amount"] = amount
if sandbox is not None:
constraints["sandBox"] = sandbox
if timeToResolveSec is not None:
constraints["timeToResolve"] = 1000 * timeToResolveSec
if timeToExpireSec is not None:
constraints["timeToExpire"] = 1000 * timeToExpireSec
if qualifications is not None:
constraints["metrics"] = qualifications
params = {
"type": "EVALUATE_TASK",
"id": _context.id,
"sequence": seq,
"name": name,
"platform": "MTURK",
"constraints": constraints,
"payload": {},
}
params["payload"]["input"] = input
params["payload"]["output"] = output
params["payload"]["taskInfo"] = {}
params["payload"]["actions"] = {}
if title is not None:
params["payload"]["taskInfo"]["title"] = title
if description is not None:
params["payload"]["taskInfo"]["description"] = description
if paragraphs is not None:
params["payload"]["taskInfo"]["paragraphs"] = paragraphs
params["payload"]["actions"]["showReject"] = show_reject
f = None
with _task_futures_lock:
if seq not in _task_futures[_context.id]:
_task_futures[_context.id][seq] = task_future()
f = _task_futures[_context.id][seq]
message_for_agent = message(params)
with _pipe_lock:
writeln_to_pipe_and_flush(message_for_agent.to_json)
return f
| 28.245588 | 195 | 0.6242 |
7944f86eef9f54bcb82b822d92fe5b30b5bf84ba | 164 | py | Python | src/week_7/data/io.py | Rutafar/text-analytics-lecture | 3cdf5cf5b0d2ca85343c259beade0054f4be6e3d | [
"MIT"
] | 2 | 2018-03-05T20:45:11.000Z | 2019-03-17T02:59:57.000Z | src/week_7/data/io.py | Rutafar/text-analytics-lecture | 3cdf5cf5b0d2ca85343c259beade0054f4be6e3d | [
"MIT"
] | null | null | null | src/week_7/data/io.py | Rutafar/text-analytics-lecture | 3cdf5cf5b0d2ca85343c259beade0054f4be6e3d | [
"MIT"
] | 8 | 2018-03-02T14:12:35.000Z | 2018-04-21T10:31:25.000Z | from utils.utils import data_from_pickle
def load_tweets():
"""Load cleaned tweets into session."""
return data_from_pickle('02_cleaned/trump_tweets.pkl') | 27.333333 | 58 | 0.762195 |
7944f9854124b32b253e7e94def6ab34fdc83b26 | 3,089 | py | Python | model/CNN6_FC2.py | dmholtz/cnn-cifar10-pytorch | 00246ab1f2694332328987fdff2e14bb106e2241 | [
"MIT"
] | 4 | 2020-04-13T06:28:10.000Z | 2021-12-07T07:52:06.000Z | model/CNN6_FC2.py | dmholtz/cnn-cifar10-pytorch | 00246ab1f2694332328987fdff2e14bb106e2241 | [
"MIT"
] | null | null | null | model/CNN6_FC2.py | dmholtz/cnn-cifar10-pytorch | 00246ab1f2694332328987fdff2e14bb106e2241 | [
"MIT"
] | null | null | null | """
Convolutional neural network with five convolutional layer and two fully-
connected layers afterwards
@author: dmholtz
"""
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1. convolutional layer
# sees 32x32x3 image tensor, i.e 32x32 RGB pixel image
# outputs 32 filtered images, kernel-size is 3
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.conv1_bn = nn.BatchNorm2d(32)
# 2. convolutional layer
# sees 16x16x32 tensor (2x2 MaxPooling layer beforehand)
# outputs 32 filtered images, kernel-size is 3
self.conv2 = nn.Conv2d(32, 32, 3, padding=1)
self.conv2_bn = nn.BatchNorm2d(32)
# 3. convolutional layer
# sees 8x8x32 tensor (2x2 MaxPooling layer beforehand)
# outputs 64 filtered images, kernel-size is 3
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.conv3_bn = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 64, 3, padding=1)
self.conv4_bn = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(64, 128, 3, padding=1)
self.conv5_bn = nn.BatchNorm2d(128)
self.conv6 = nn.Conv2d(128, 128 ,3 ,padding = 1)
self.conv6_bn = nn.BatchNorm2d(128)
# Definition of the MaxPooling layer
self.pool = nn.MaxPool2d(2, 2)
# 1. fully-connected layer
# Input is a flattened 4*4*64 dimensional vector
# Output is 500 dimensional vector
self.fc1 = nn.Linear(128 * 4 * 4, 128)
self.fc1_bn = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 10)
# defintion of dropout (dropout probability 25%)
self.dropout20 = nn.Dropout(0.2)
self.dropout30 = nn.Dropout(0.3)
self.dropout40 = nn.Dropout(0.4)
self.dropout50 = nn.Dropout(0.5)
def forward(self, x):
# Pass data through a sequence of 3 convolutional layers
# Firstly, filters are applied -> increases the depth
# Secondly, Relu activation function is applied
# Finally, MaxPooling layer decreases width and height
x = self.conv1_bn(F.relu(self.conv1(x)))
x = self.pool(self.conv2_bn(F.relu(self.conv2(x))))
x = self.dropout20(x)
x = self.conv3_bn(F.relu(self.conv3(x)))
x = self.pool(self.conv4_bn(F.relu(self.conv4(x))))
x = self.dropout30(x)
x = self.conv5_bn(F.relu(self.conv5(x)))
x = self.pool(self.conv6_bn(F.relu(self.conv6(x))))
x = self.dropout40(x)
# flatten output of third convolutional layer into a vector
# this vector is passed through the fully-connected nn
x = x.view(-1, 128 * 4 * 4)
# add dropout layer
# add 1st hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout50(x)
# add 2nd hidden layer, without relu activation function
x = self.fc2(x)
return x
| 38.135802 | 73 | 0.613143 |
7944f9a70152e5ecb54755b5e7a6f7bf8373f60e | 27,507 | py | Python | flopy/modflow/mfswi2.py | geowat/flopy | b6b110a8807e18dca9b0b7491db0a72b36709098 | [
"CC0-1.0",
"BSD-3-Clause"
] | 1 | 2021-03-17T09:15:54.000Z | 2021-03-17T09:15:54.000Z | flopy/modflow/mfswi2.py | geowat/flopy | b6b110a8807e18dca9b0b7491db0a72b36709098 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | flopy/modflow/mfswi2.py | geowat/flopy | b6b110a8807e18dca9b0b7491db0a72b36709098 | [
"CC0-1.0",
"BSD-3-Clause"
] | 2 | 2020-01-03T17:14:39.000Z | 2020-03-04T14:21:27.000Z | """
mfswi2 module. Contains the ModflowSwi2 class. Note that the user can access
the ModflowSwi2 class as `flopy.modflow.ModflowSwi2`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/swi2_seawater_intrusion_pack.htm>`_.
"""
import sys
import numpy as np
from ..pakbase import Package
from ..utils import Util2d, Util3d
class ModflowSwi2(Package):
"""
MODFLOW SWI2 Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
nsrf : int
number of active surfaces (interfaces). This equals the number of zones
minus one. (default is 1).
istrat : int
flag indicating the density distribution. (default is 1).
iswizt : int
unit number for zeta output. (default is None).
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is None).
iswiobs : int
flag and unit number SWI2 observation output. (default is 0).
options : list of strings
Package options. If 'adaptive' is one of the options adaptive SWI2 time
steps will be used. (default is None).
nsolver : int
DE4 solver is used if nsolver=1. PCG solver is used if nsolver=2.
(default is 1).
iprsol : int
solver print out interval. (default is 0).
mutsol : int
If MUTSOL = 0, tables of maximum head change and residual will be
printed each iteration.
If MUTSOL = 1, only the total number of iterations will be printed.
If MUTSOL = 2, no information will be printed.
If MUTSOL = 3, information will only be printed if convergence fails.
(default is 3).
solver2parameters : dict
only used if nsolver = 2
mxiter : int
maximum number of outer iterations. (default is 100)
iter1 : int
maximum number of inner iterations. (default is 20)
npcond : int
flag used to select the matrix conditioning method. (default is 1).
specify NPCOND = 1 for Modified Incomplete Cholesky.
specify NPCOND = 2 for Polynomial.
zclose : float
is the ZETA change criterion for convergence. (default is 1e-3).
rclose : float
is the residual criterion for convergence. (default is 1e-4)
relax : float
is the relaxation parameter used with NPCOND = 1. (default is 1.0)
nbpol : int
is only used when NPCOND = 2 to indicate whether the estimate of
the upper bound on the maximum eigenvalue is 2.0, or whether the
estimate will be calculated. NBPOL = 2 is used to specify the
value is 2.0; for any other value of NBPOL, the estimate is
calculated. Convergence is generally insensitive to this
parameter. (default is 2).
damp : float
is the steady-state damping factor. (default is 1.)
dampt : float
is the transient damping factor. (default is 1.)
toeslope : float
Maximum slope of toe cells. (default is 0.05)
tipslope : float
Maximum slope of tip cells. (default is 0.05)
alpha : float
fraction of threshold used to move the tip and toe to adjacent empty
cells when the slope exceeds user-specified TOESLOPE and TIPSLOPE
values. (default is None)
beta : float
Fraction of threshold used to move the toe to adjacent non-empty cells
when the surface is below a minimum value defined by the user-specified
TOESLOPE value. (default is 0.1).
napptmx : int
only used if adaptive is True. Maximum number of SWI2 time steps per
MODFLOW time step. (default is 1).
napptmn : int
only used if adaptive is True. Minimum number of SWI2 time steps per
MODFLOW time step. (default is 1).
adptfct : float
is the factor used to evaluate tip and toe thicknesses and control the
number of SWI2 time steps per MODFLOW time step. When the maximum tip
or toe thickness exceeds the product of TOESLOPE or TIPSLOPE the cell
size and ADPTFCT, the number of SWI2 time steps are increased to a
value less than or equal to NADPT. When the maximum tip or toe
thickness is less than the product of TOESLOPE or TIPSLOPE the cell
size and ADPTFCT, the number of SWI2 time steps is decreased in the
next MODFLOW time step to a value greater than or equal to 1. ADPTFCT
must be greater than 0.0 and is reset to 1.0 if NADPTMX is equal to
NADPTMN. (default is 1.0).
nu : array of floats
if istart = 1, density of each zone (nsrf + 1 values). if istrat = 0,
density along top of layer, each surface, and bottom of layer
(nsrf + 2 values). (default is 0.025)
zeta : list of floats or list of array of floats [(nlay, nrow, ncol),
(nlay, nrow, ncol)] initial elevations of the active surfaces. The
list should contain an entry for each surface and be of size nsrf.
(default is [0.])
ssz : float or array of floats (nlay, nrow, ncol)
effective porosity. (default is 0.25)
isource : integer or array of integers (nlay, nrow, ncol)
Source type of any external sources or sinks, specified with any
outside package (i.e. WEL Package, RCH Package, GHB Package).
(default is 0).
If ISOURCE > 0 sources and sinks have the same fluid density as the
zone ISOURCE. If such a zone is not present in the cell, sources and
sinks have the same fluid density as the active zone at the top of
the aquifer. If ISOURCE = 0 sources and sinks have the same fluid
density as the active zone at the top of the aquifer. If ISOURCE < 0
sources have the same fluid density as the zone with a number equal
to the absolute value of ISOURCE. Sinks have the same fluid density
as the active zone at the top of the aquifer. This option is useful
for the modeling of the ocean bottom where infiltrating water is
salt, yet exfiltrating water is of the same type as the water at the
top of the aquifer.
obsnam : list of strings
names for nobs observations.
obslrc : list of lists
zero-based [layer, row, column] lists for nobs observations.
extension : string
Filename extension (default is 'swi2')
npln : int
Deprecated - use nsrf instead.
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the zeta, cbc, obs output files.
If filenames=None the package name will be created using the model name
and package extension and the output file names will be created using
the model name and output extensions. If a single string is passed the
package will be set to the string and output names will be created
using the model name and zeta, cbc, and observation extensions. To
define the names for all package files (input and output) the length
of the list of strings should be 4. Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Parameters are supported in Flopy only when reading in existing models.
Parameter values are converted to native values in Flopy and the
connection to "parameters" is thus nonexistent.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> swi2 = flopy.modflow.ModflowSwi2(m)
"""
def __init__(self, model, nsrf=1, istrat=1, nobs=0, iswizt=None,
ipakcb=None, iswiobs=0, options=None,
nsolver=1, iprsol=0, mutsol=3,
solver2params={'mxiter': 100, 'iter1': 20, 'npcond': 1,
'zclose': 1e-3, 'rclose': 1e-4, 'relax': 1.0,
'nbpol': 2, 'damp': 1.0, 'dampt': 1.0},
toeslope=0.05, tipslope=0.05, alpha=None, beta=0.1, nadptmx=1,
nadptmn=1, adptfct=1.0, nu=0.025, zeta=[0.0], ssz=0.25,
isource=0, obsnam=None, obslrc=None, npln=None,
extension='swi2', unitnumber=None, filenames=None):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowSwi2.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None, None, None]
elif isinstance(filenames, str):
filenames = [filenames, None, None, None]
elif isinstance(filenames, list):
if len(filenames) < 4:
for idx in range(len(filenames), 4):
filenames.append(None)
# update external file information with zeta output, if necessary
if iswizt is not None:
fname = filenames[1]
model.add_output_file(iswizt, fname=fname, extension='zta',
package=ModflowSwi2.ftype())
else:
iswizt = 0
# update external file information with swi2 cell-by-cell output,
# if necessary
if ipakcb is not None:
fname = filenames[2]
model.add_output_file(ipakcb, fname=fname,
package=ModflowSwi2.ftype())
else:
ipakcb = 0
# Process observations
if nobs != 0:
print('ModflowSwi2: specification of nobs is deprecated.')
nobs = 0
if obslrc is not None:
if isinstance(obslrc, list) or isinstance(obslrc, tuple):
obslrc = np.array(obslrc, dtype=np.int32)
if isinstance(obslrc, np.ndarray):
if obslrc.ndim == 1 and obslrc.size == 3:
obslrc = obslrc.reshape((1, 3))
else:
errmsg = 'ModflowSwi2: obslrc must be a tuple or ' + \
'list of tuples.'
raise Exception(errmsg)
nobs = obslrc.shape[0]
if obsnam is None:
obsnam = []
for n in range(nobs):
obsnam.append('Obs{:03}'.format(n + 1))
else:
if not isinstance(obsnam, list):
obsnam = [obsnam]
if len(obsnam) != nobs:
errmsg = 'ModflowSwi2: obsnam must be a list with a ' + \
'length of {} not {}.'.format(nobs, len(obsnam))
raise Exception(errmsg)
if nobs > 0:
binflag = False
ext = 'zobs.out'
fname = filenames[3]
if iswiobs is not None:
if iswiobs < 0:
binflag = True
ext = 'zobs.bin'
else:
iswiobs = 1053
# update external file information with swi2 observation output,
# if necessary
model.add_output_file(iswiobs, fname=fname, binflag=binflag,
extension=ext, package=ModflowSwi2.ftype())
else:
iswiobs = 0
# Fill namefile items
name = [ModflowSwi2.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and
# unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
# options
self.fsssopt, self.adaptive = False, False
if isinstance(options, list):
if len(options) < 1:
self.options = None
else:
self.options = options
for o in self.options:
if o.lower() == 'fsssopt':
self.fsssopt = True
elif o.lower() == 'adaptive':
self.adaptive = True
else:
self.options = None
if npln is not None:
print('npln keyword is deprecated. use the nsrf keyword')
nsrf = npln
self.nsrf, self.istrat, self.nobs, self.iswizt, self.iswiobs = nsrf, \
istrat, \
nobs, \
iswizt, \
iswiobs
self.ipakcb = ipakcb
#
self.nsolver, self.iprsol, self.mutsol = nsolver, iprsol, mutsol
#
self.solver2params = solver2params
#
self.toeslope, self.tipslope, self.alpha, self.beta = toeslope, \
tipslope, \
alpha, \
beta
self.nadptmx, self.nadptmn, self.adptfct = nadptmx, nadptmn, adptfct
# Create arrays so that they have the correct size
if self.istrat == 1:
self.nu = Util2d(model, (self.nsrf + 1,), np.float32, nu,
name='nu')
else:
self.nu = Util2d(model, (self.nsrf + 2,), np.float32, nu,
name='nu')
self.zeta = []
for i in range(self.nsrf):
self.zeta.append(Util3d(model, (nlay, nrow, ncol), np.float32,
zeta[i], name='zeta_' + str(i + 1)))
self.ssz = Util3d(model, (nlay, nrow, ncol), np.float32, ssz,
name='ssz')
self.isource = Util3d(model, (nlay, nrow, ncol), np.int32, isource,
name='isource')
#
self.obsnam = obsnam
self.obslrc = obslrc
if nobs != 0:
self.nobs = self.obslrc.shape[0]
#
self.parent.add_package(self)
def write_file(self, check=True, f=None):
"""
Write the package file.
Parameters
----------
check : boolean
Check package data for common errors. (default True)
Returns
-------
None
"""
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# Open file for writing
if f is None:
f = open(self.fn_path, 'w')
# First line: heading
f.write('{}\n'.format(
self.heading)) # Writing heading not allowed in SWI???
# write dataset 1
f.write('# Dataset 1\n')
f.write(
'{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}'.format(self.nsrf,
self.istrat,
self.nobs,
self.iswizt,
self.ipakcb,
self.iswiobs))
# write SWI2 options
if self.options != None:
for o in self.options:
f.write(' {}'.format(o))
f.write('\n')
# write dataset 2a
f.write('# Dataset 2a\n')
f.write('{:10d}{:10d}{:10d}\n'.format(self.nsolver, self.iprsol,
self.mutsol))
# write dataset 2b
if self.nsolver == 2:
f.write('# Dataset 2b\n')
f.write('{:10d}'.format(self.solver2params['mxiter']))
f.write('{:10d}'.format(self.solver2params['iter1']))
f.write('{:10d}'.format(self.solver2params['npcond']))
f.write('{:14.6g}'.format(self.solver2params['zclose']))
f.write('{:14.6g}'.format(self.solver2params['rclose']))
f.write('{:14.6g}'.format(self.solver2params['relax']))
f.write('{:10d}'.format(self.solver2params['nbpol']))
f.write('{:14.6g}'.format(self.solver2params['damp']))
f.write('{:14.6g}\n'.format(self.solver2params['dampt']))
# write dataset 3a
f.write('# Dataset 3a\n')
f.write('{:14.6g}{:14.6g}'.format(self.toeslope, self.tipslope))
if self.alpha is not None:
f.write('{:14.6g}{:14.6g}'.format(self.alpha, self.beta))
f.write('\n')
# write dataset 3b
if self.adaptive is True:
f.write('# Dataset 3b\n')
f.write('{:10d}{:10d}{:14.6g}\n'.format(self.nadptmx,
self.nadptmn,
self.adptfct))
# write dataset 4
f.write('# Dataset 4\n')
f.write(self.nu.get_file_entry())
# write dataset 5
f.write('# Dataset 5\n')
for isur in range(self.nsrf):
for ilay in range(nlay):
f.write(self.zeta[isur][ilay].get_file_entry())
# write dataset 6
f.write('# Dataset 6\n')
f.write(self.ssz.get_file_entry())
# write dataset 7
f.write('# Dataset 7\n')
f.write(self.isource.get_file_entry())
# write dataset 8
if self.nobs > 0:
f.write('# Dataset 8\n')
for i in range(self.nobs):
# f.write(self.obsnam[i] + 3 * '%10i' % self.obslrc + '\n')
f.write('{} '.format(self.obsnam[i]))
for v in self.obslrc[i, :]:
f.write('{:10d}'.format(v + 1))
f.write('\n')
# close swi2 file
f.close()
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
swi2 : ModflowSwi2 object
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> swi2 = flopy.modflow.ModflowSwi2.load('test.swi2', m)
"""
if model.verbose:
sys.stdout.write('loading swi2 package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# determine problem dimensions
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
# --read dataset 1
if model.verbose:
sys.stdout.write(' loading swi2 dataset 1\n')
t = line.strip().split()
nsrf = int(t[0])
istrat = int(t[1])
nobs = int(t[2])
if int(t[3]) > 0:
model.add_pop_key_list(int(t[3]))
iswizt = int(t[3])
if int(t[4]) > 0:
model.add_pop_key_list(int(t[4]))
ipakcb = int(t[4])
else:
ipakcb = 0
iswiobs = 0
if int(t[5]) > 0:
model.add_pop_key_list(int(t[5]))
iswiobs = int(t[5])
options = []
adaptive = False
for idx in range(6, len(t)):
if '#' in t[idx]:
break
options.append(t[idx])
if 'adaptive' in t[idx].lower():
adaptive = True
# read dataset 2a
if model.verbose:
sys.stdout.write(' loading swi2 dataset 2a\n')
while True:
line = f.readline()
if line[0] != '#':
break
t = line.strip().split()
nsolver = int(t[0])
iprsol = int(t[1])
mutsol = int(t[2])
# read dataset 2b
solver2params = {}
if nsolver == 2:
if model.verbose:
sys.stdout.write(' loading swi2 dataset 2b\n')
while True:
line = f.readline()
if line[0] != '#':
break
t = line.strip().split()
solver2params['mxiter'] = int(t[0])
solver2params['iter1'] = int(t[1])
solver2params['npcond'] = int(t[2])
solver2params['zclose'] = float(t[3])
solver2params['rclose'] = float(t[4])
solver2params['relax'] = float(t[5])
solver2params['nbpol'] = int(t[6])
solver2params['damp'] = float(t[7])
solver2params['dampt'] = float(t[8])
# read dataset 3a
if model.verbose:
sys.stdout.write(' loading swi2 dataset 3a\n')
while True:
line = f.readline()
if line[0] != '#':
break
t = line.strip().split()
toeslope = float(t[0])
tipslope = float(t[1])
alpha = None
beta = 0.1
if len(t) > 2:
try:
alpha = float(t[2])
beta = float(t[3])
except:
if model.verbose:
print(' explicit alpha and beta in file')
# read dataset 3b
nadptmx, nadptmn, adptfct = None, None, None
if adaptive:
if model.verbose:
sys.stdout.write(' loading swi2 dataset 3b\n')
while True:
line = f.readline()
if line[0] != '#':
break
t = line.strip().split()
nadptmx = int(t[0])
nadptmn = int(t[1])
adptfct = float(t[2])
# read dataset 4
if model.verbose:
print(' loading nu...')
if istrat == 1:
nnu = nsrf + 1
else:
nnu = nsrf + 2
while True:
ipos = f.tell()
line = f.readline()
if line[0] != '#':
f.seek(ipos)
break
nu = Util2d.load(f, model, (nnu,), np.float32, 'nu',
ext_unit_dict)
# read dataset 5
if model.verbose:
print(' loading initial zeta surfaces...')
while True:
ipos = f.tell()
line = f.readline()
if line[0] != '#':
f.seek(ipos)
break
zeta = []
for n in range(nsrf):
ctxt = 'zeta_surf{:02d}'.format(n + 1)
zeta.append(Util3d.load(f, model, (nlay, nrow, ncol),
np.float32, ctxt, ext_unit_dict))
# read dataset 6
if model.verbose:
print(' loading initial ssz...')
while True:
ipos = f.tell()
line = f.readline()
if line[0] != '#':
f.seek(ipos)
break
ssz = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'ssz', ext_unit_dict)
# read dataset 7
if model.verbose:
print(' loading initial isource...')
while True:
ipos = f.tell()
line = f.readline()
if line[0] != '#':
f.seek(ipos)
break
isource = Util3d.load(f, model, (nlay, nrow, ncol), np.int32,
'isource', ext_unit_dict)
# read dataset 8
obsname = []
obslrc = []
if nobs > 0:
if model.verbose:
print(' loading observation locations...')
while True:
line = f.readline()
if line[0] != '#':
break
for i in range(nobs):
if i > 0:
try:
line = f.readline()
except:
break
t = line.strip().split()
obsname.append(t[0])
kk = int(t[1]) - 1
ii = int(t[2]) - 1
jj = int(t[3]) - 1
obslrc.append([kk, ii, jj])
nobs = len(obsname)
# determine specified unit number
unitnumber = None
filenames = [None, None, None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowSwi2.ftype())
if iswizt > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=iswizt)
if ipakcb > 0:
iu, filenames[2] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
if abs(iswiobs) > 0:
iu, filenames[3] = \
model.get_ext_dict_attr(ext_unit_dict, unit=abs(iswiobs))
# create swi2 instance
swi2 = ModflowSwi2(model, nsrf=nsrf, istrat=istrat,
iswizt=iswizt, ipakcb=ipakcb,
iswiobs=iswiobs, options=options,
nsolver=nsolver, iprsol=iprsol, mutsol=mutsol,
solver2params=solver2params,
toeslope=toeslope, tipslope=tipslope, alpha=alpha,
beta=beta,
nadptmx=nadptmx, nadptmn=nadptmn, adptfct=adptfct,
nu=nu, zeta=zeta, ssz=ssz, isource=isource,
obsnam=obsname, obslrc=obslrc,
unitnumber=unitnumber, filenames=filenames)
# return swi2 instance
return swi2
@staticmethod
def ftype():
return 'SWI2'
@staticmethod
def defaultunit():
return 29
| 38.906648 | 94 | 0.501072 |
7944fa05774966b34f9663062b3dcb45a8f5fac7 | 15,975 | py | Python | process.py | constantinius/open-science-catalog-builder | 328855438e24086e1fd445a1fd8c8280b5ccf4da | [
"MIT"
] | null | null | null | process.py | constantinius/open-science-catalog-builder | 328855438e24086e1fd445a1fd8c8280b5ccf4da | [
"MIT"
] | null | null | null | process.py | constantinius/open-science-catalog-builder | 328855438e24086e1fd445a1fd8c8280b5ccf4da | [
"MIT"
] | null | null | null | import csv
import json
from datetime import datetime
import re
import os
import shutil
from typing import List, Optional, TypedDict
import pystac
import pystac.extensions.scientific
import pystac.summaries
import pystac.layout
import click
from dateutil.parser import parse
from pygeoif import geometry
from slugify import slugify
from lxml.builder import ElementMaker
from lxml import etree
RE_ID_REP = re.compile('r[^A-Za-z0-9\- ]+')
class MultiCollectionItem(pystac.Item):
def set_collection(self, collection: Optional[pystac.Collection]) -> "Item":
# self.remove_links(pystac.RelType.COLLECTION)
self.collection_id = None
if collection is not None:
self.add_link(pystac.Link.collection(collection))
self.collection_id = collection.id
return self
def get_depth(maybe_list):
if isinstance(maybe_list, (list, tuple)):
return get_depth(maybe_list[0]) + 1
return 0
def get_themes(obj):
return [
obj[f"Theme{i}"]
for i in range(1, 7)
if obj[f"Theme{i}"]
]
def parse_date(source):
if not source:
return None
year, month = source.split(".")
return datetime(int(year), int(month) + 1, 1)
def get_geometry(source):
geom = None
if not source:
pass
elif source.startswith("Multipolygon"):
# geom = geometry.from_wkt(source.replace("Multipolygon", "MULTIPOLYGON"))
# TODO: figure out a way to parse this
pass
else:
try:
raw_geom = json.loads(source)
except ValueError:
print(source)
return None
depth = get_depth(raw_geom)
if depth == 1:
geom = geometry.Point(*raw_geom)
elif depth == 3:
shell, *holes = raw_geom
geom = geometry.Polygon(shell, holes or None)
if geom:
return geom.__geo_interface__
return None
def product_to_item(obj):
properties = {
"start_datetime": obj["Start"] and parse_date(obj["Start"]).isoformat() or None,
"end_datetime": obj["End"] and parse_date(obj["End"]).isoformat() or None,
"title": obj["Product"],
"description": obj["Description"],
"mission": obj["EO_Missions"],
"osc:project": obj["Project"],
"osc:themes": get_themes(obj),
"osc:variable": obj["Variable"],
"osc:status": obj["Status"], # TODO maybe use a STAC field
"osc:region": obj["Region"],
"osc:type": "Product",
# scientific extension DOI
}
item = pystac.Item(
f"product-{obj['ID']}",
get_geometry(obj["Polygon"]),
None,
obj["Start"] and parse_date(obj["Start"]) or None,
properties=properties,
href=f"products/product-{obj['ID']}.json"
)
item.add_link(
pystac.Link(
pystac.RelType.VIA,
obj["Website"]
)
)
item.add_link(
pystac.Link(
pystac.RelType.VIA,
obj["Access"]
)
)
item.add_link(
pystac.Link(
pystac.RelType.VIA,
obj["Documentation"]
)
)
sci_ext = pystac.extensions.scientific.ScientificExtension.ext(item, True)
sci_ext.apply(obj["DOI"])
return item
def project_to_item(obj):
properties = {
"start_datetime": parse(obj["Start_Date_Project"]).isoformat(),
"end_datetime": parse(obj["End_Date_Project"]).isoformat(),
"title": obj["Project_Name"],
"description": obj["Short_Description"],
"osc:themes": get_themes(obj),
"osc:status": obj["Status"], # TODO maybe use a STAC field
"osc:consortium": obj["Consortium"],
"osc:technical_officer": {
"name": obj["TO"],
"email": obj["TO_E-mail"],
},
"osc:type": "Project",
}
item = MultiCollectionItem(
f"project-{obj['Project_ID']}",
None,
None,
parse(obj["Start_Date_Project"]),
properties=properties,
href=f"projects/project-{obj['Project_ID']}.json"
)
item.add_link(
pystac.Link(
pystac.RelType.VIA,
obj["Website"]
)
)
item.add_link(
pystac.Link(
pystac.RelType.VIA,
obj["Eo4Society_link"]
)
)
return item
def theme_to_collection(obj):
identifier = obj["theme"].strip()
collection = pystac.Collection(
identifier,
obj["description"],
extent=pystac.Extent(
pystac.SpatialExtent([-180, -90, 180, 90]),
pystac.TemporalExtent([[None, None]])
),
href=f"themes/{identifier}.json"
)
collection.extra_fields = {
"osc:type": "Theme",
}
collection.add_link(
pystac.Link(
pystac.RelType.VIA,
obj["link"]
)
)
return collection
def variable_to_collection(obj):
identifier = obj["variable"].strip()
collection = pystac.Collection(
identifier,
obj["variable description"],
extent=pystac.Extent(
pystac.SpatialExtent([-180, -90, 180, 90]),
pystac.TemporalExtent([[None, None]])
),
href=f"variables/{identifier}.json"
)
collection.extra_fields = {
"osc:theme": obj["theme"],
"osc:type": "Variable",
}
collection.add_link(
pystac.Link(
pystac.RelType.VIA,
obj["link"]
)
)
return collection
@click.command()
@click.argument('variables_file', type=click.File('r'))
@click.argument('themes_file', type=click.File('r'))
@click.argument('projects_file', type=click.File('r'))
@click.argument('products_file', type=click.File('r'))
@click.option("--out-dir", "-o", default="dist", type=str)
def main(variables_file, themes_file, projects_file, products_file, out_dir):
# with open("Variables.csv") as f:
variables = list(csv.DictReader(variables_file))
# with open("Themes.csv") as f:
themes = list(csv.DictReader(themes_file))
# with open("Projects-2021-12-20.csv") as f:
projects = list(csv.DictReader(projects_file))
# with open("Products-2021-12-20.csv") as f:
products = list(csv.DictReader(products_file))
catalog = pystac.Catalog(
'OSC-Catalog',
'OSC-Catalog',
href="catalog.json"
)
theme_collections = [
theme_to_collection(theme)
for theme in themes
]
theme_map = {
slugify(coll.id): coll
for coll in theme_collections
}
variable_collections = [
variable_to_collection(variable)
for variable in variables
]
variable_map = {
slugify(coll.id): coll
for coll in variable_collections
}
product_items = [
product_to_item(product)
for product in products
]
# TODO: figure out what to do with projects
project_items = [
project_to_item(project)
for project in projects
]
# place variable collections into theme collections
for coll in variable_collections:
theme_coll = theme_map[slugify(coll.extra_fields["osc:theme"])]
theme_coll.add_child(coll)
# put products into variable collections
for item in product_items:
try:
variable_coll = variable_map[slugify(item.properties["osc:variable"])]
except KeyError:
print(f"Missing variable {item.properties['osc:variable']}")
variable_coll.add_item(item)
# put projects into their respective theme collections
for item in project_items:
for theme in item.properties["osc:themes"]:
theme_map[slugify(theme)].add_item(item)
catalog.add_children(theme_collections)
# catalog.add_items(project_items)
# calculate summary information for variable and themes
for coll in variable_collections:
years = set()
i = 0
for i, item in enumerate(coll.get_items(), start=1):
if item.properties["start_datetime"]:
years.add(
parse(item.properties["start_datetime"]).year
# int(item.properties["start_datetime"].split(".")[0])
)
# TODO: use summaries instead?
coll.extra_fields["osc:years"] = sorted(years)
coll.extra_fields["osc:numberOfProducts"] = i
for coll in theme_collections:
years = set()
number_of_products = 0
i = 0
for i, sub_coll in enumerate(coll.get_collections(), start=1):
years.update(sub_coll.extra_fields["osc:years"])
number_of_products += sub_coll.extra_fields["osc:numberOfProducts"]
coll.extra_fields["osc:years"] = sorted(years)
coll.extra_fields["osc:numberOfProducts"] = number_of_products
coll.extra_fields["osc:numberOfVariables"] = i
for i, item in enumerate(coll.get_items(), start=1):
pass
coll.extra_fields["osc:numberOfProjects"] = i
years = set()
number_of_products = 0
number_of_variables = 0
i = 0
for i, coll in enumerate(theme_collections, start=1):
years.update(coll.extra_fields["osc:years"])
number_of_products += coll.extra_fields["osc:numberOfProducts"]
number_of_variables += coll.extra_fields["osc:numberOfVariables"]
catalog.extra_fields = {
"osc:numberOfProducts": number_of_products,
"osc:numberOfProjects": len(project_items),
"osc:numberOfVariables": number_of_variables,
"osc:numberOfThemes": i,
"osc:years": sorted(years),
}
metrics = {
"id": catalog.id,
"summary": {
"years": catalog.extra_fields["osc:years"],
"numberOfProducts": catalog.extra_fields["osc:numberOfProducts"],
"numberOfProjects": catalog.extra_fields["osc:numberOfProjects"],
"numberOfVariables": catalog.extra_fields["osc:numberOfVariables"],
"numberOfThemes": catalog.extra_fields["osc:numberOfThemes"],
},
"themes": [
{
"name": theme_coll.id,
"description": theme_coll.description,
"image": "...",
"website": theme_coll.get_single_link(pystac.RelType.VIA).get_href(),
# "technicalOfficer": theme_coll.extra_fields["osc:technical_officer"]["name"],
"summary": {
"years": theme_coll.extra_fields["osc:years"],
"numberOfProducts": theme_coll.extra_fields["osc:numberOfProducts"],
"numberOfProjects": theme_coll.extra_fields["osc:numberOfProjects"],
"numberOfVariables": theme_coll.extra_fields["osc:numberOfVariables"],
},
"variables": [
{
"name": var_coll.id,
"description": var_coll.description,
"summary": {
"years": var_coll.extra_fields["osc:years"],
"numberOfProducts": var_coll.extra_fields["osc:numberOfProducts"],
}
}
for var_coll in theme_coll.get_collections()
]
}
for theme_coll in catalog.get_collections()
]
}
# create codelists.xml
nsmap = {
"gmd": "http://www.isotc211.org/2005/gmd",
"gmx": "http://www.isotc211.org/2005/gmx",
"gco": "http://www.isotc211.org/2005/gco",
"gml": "http://www.opengis.net/gml/3.2",
"xlink": "http://www.w3.org/1999/xlink",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
GMX = ElementMaker(namespace=nsmap["gmx"], nsmap=nsmap)
GML = ElementMaker(namespace=nsmap["gml"], nsmap=nsmap)
GMD = ElementMaker(namespace=nsmap["gmd"], nsmap=nsmap)
GCO = ElementMaker(namespace=nsmap["gco"], nsmap=nsmap)
codelist = GMX(
"CT_CodelistCatalogue",
GMX(
"name",
GCO("CharacterString", "OSC_Codelists")
),
GMX(
"scope",
GCO("CharacterString", "Codelists for Open Science Catalog")
),
GMX(
"fieldOfApplication",
GCO("CharacterString", "Open Science Catalog")
),
GMX(
"versionNumber",
GCO("CharacterString", "1.0.0")
),
GMX(
"versionDate",
GCO("Date", "2022-02-05")
),
GMX(
"language",
GMD("LanguageCode", "English", codeList="#LanguageCode", codeListValue="eng"),
),
GMX(
"characterSet",
GMD("MD_CharacterSetCode", "utf8", codeList="#MD_CharacterSetCode", codeListValue="utf8"),
),
# actual codelists for themes, variables
GMX(
"codeListItem",
*[
GMX(
"codeEntry",
GMX(
"CodeDefinition",
GML("identifier", f"OSC_Theme_{theme_coll.id}", codeSpace="OSC"),
GML("description", theme_coll.description),
GML(
"descriptionReference",
**{
f"{{{nsmap['xlink']}}}type": "simple",
f"{{{nsmap['xlink']}}}href": theme_coll.get_single_link(pystac.RelType.VIA).href,
}
),
**{f"{{{nsmap['gml']}}}id": f"OSC_Theme_{theme_coll.id}"}
)
)
for theme_coll in theme_collections
]
),
GMX(
"codeListItem",
*[
GMX(
"codeEntry",
GMX(
"CodeDefinition",
GML("identifier", f"OSC_Variable_{variable_coll.id}", codeSpace="OSC"),
GML("description", variable_coll.description),
GML(
"descriptionReference",
**{
f"{{{nsmap['xlink']}}}type": "simple",
f"{{{nsmap['xlink']}}}href": variable_coll.get_single_link(pystac.RelType.VIA).href,
}
),
**{f"{{{nsmap['gml']}}}id": f"OSC_Variable_{variable_coll.id}"}
)
)
for variable_coll in variable_collections
]
),
# TODO: add EO-Missions?
)
os.makedirs(out_dir)
os.chdir(out_dir)
with open("metrics.json", "w") as f:
json.dump(metrics, f, indent=4)
etree.ElementTree(codelist).write("codelists.xml", pretty_print=True)
catalog.add_link(pystac.Link(pystac.RelType.ALTERNATE, "./metrics.json", "application/json"))
catalog.add_link(pystac.Link(pystac.RelType.ALTERNATE, "./codelists.xml", "application/xml"))
# catalog.describe(True)
# catalog.save(pystac.CatalogType.SELF_CONTAINED, dest_href='dist/')
# create the output directory and switch there to allow a clean build
catalog.normalize_and_save(
"",
# pystac.CatalogType.ABSOLUTE_PUBLISHED,
pystac.CatalogType.SELF_CONTAINED,
# strategy=pystac.layout.TemplateLayoutStrategy(
# collection_template="${osc:type}s/${id}.json",
# item_template="${osc:type}s/${id}.json"
# )
strategy=pystac.layout.CustomLayoutStrategy(
collection_func=lambda coll, parent_dir, is_root: f"{coll.extra_fields['osc:type'].lower()}s/{slugify(coll.id)}.json",
item_func=lambda item, parent_dir: f"{item.properties['osc:type'].lower()}s/{item.id}.json",
)
)
if __name__ == "__main__":
main()
| 31.44685 | 130 | 0.561189 |
7944fa25238e459c6599c7740f6cf7b5954ff392 | 568 | py | Python | tests/test_nft_analyser.py | gsk-gagan/nft_analyser | 7a16d18500fcd48dce22f86251eaf6be6a272141 | [
"MIT"
] | null | null | null | tests/test_nft_analyser.py | gsk-gagan/nft_analyser | 7a16d18500fcd48dce22f86251eaf6be6a272141 | [
"MIT"
] | null | null | null | tests/test_nft_analyser.py | gsk-gagan/nft_analyser | 7a16d18500fcd48dce22f86251eaf6be6a272141 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `nft_analyser` package."""
import pytest
from nft_analyser import nft_analyser
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
| 22.72 | 78 | 0.714789 |
7944fb37d8d73b94740f03b19ef318af6384b6ac | 2,347 | py | Python | cnn/eval-EXP-20200415-092238/scripts/break_images.py | jatinarora2409/darts | d360a34cef970bcd42933e31eab0f7a64120c9da | [
"Apache-2.0"
] | null | null | null | cnn/eval-EXP-20200415-092238/scripts/break_images.py | jatinarora2409/darts | d360a34cef970bcd42933e31eab0f7a64120c9da | [
"Apache-2.0"
] | null | null | null | cnn/eval-EXP-20200415-092238/scripts/break_images.py | jatinarora2409/darts | d360a34cef970bcd42933e31eab0f7a64120c9da | [
"Apache-2.0"
] | null | null | null |
from PIL import Image
import os
import sys
import numpy as np
from os import listdir
from os.path import isfile, join
from torchvision.datasets import VisionDataset
import cv2
import torchvision.transforms as transforms
break_height_width = 32
jump = 512
root_main='../data/mnt/d/SIDD_Medium_Srgb/Data/'
for root, directories, filenames in os.walk(root_main):
for filename in filenames:
print(filename)
if("NOISY" in filename or "DS_Store" in filename):
continue;
filename_NOISY =filename.replace("GT" ,"NOISY" ,1)
label_file = os.path.join(root, filename)
input_file = os.path.join(root ,filename_NOISY)
print("input_file: " +input_file)
print("label_file: " +label_file)
img = Image.open(input_file).convert('RGB')
target = Image.open(label_file).convert('RGB')
width, height = img.size
current_start_height = 0
current_start_width = 0
count = 1
while(current_start_height+jump<height):
while(current_start_width+jump<width):
left = current_start_width
right = current_start_width+break_height_width
top = current_start_height
bottom = current_start_height+break_height_width
im1 = img.crop((left, top, right, bottom))
target1 = target.crop((left, top, right, bottom))
filenames = filename.split(".")
filename_start = filenames[0]
filename_end = filenames[1]
filename_new = filename_start+"_"+str(count)+"."+filename_end
filenames_NOISY = filename_NOISY.split(".")
filename_start_NOISY = filenames_NOISY[0]
filename_end_NOISY = filenames_NOISY[1]
filename_NOISY_new = filename_start_NOISY + "_" + str(count) + "." + filename_end_NOISY
#im1.show()
#target1.show()
im1.save(os.path.join(root, filename_NOISY_new))
target1.save(os.path.join(root, filename_new ))
count = count+1
current_start_width = current_start_width+jump
current_start_height = current_start_height+jump
current_start_width=0
os.remove(label_file)
os.remove(input_file)
| 39.116667 | 103 | 0.620366 |
7944fc014967d9a3fafb91ed658f0e7bd3db8c32 | 209 | py | Python | tests/conftest.py | stepansnigirev/lnbits | 82731dc901780b959d6ebecc4f61be137c8d2884 | [
"MIT"
] | 258 | 2020-04-27T21:36:21.000Z | 2021-10-30T23:24:48.000Z | tests/conftest.py | stepansnigirev/lnbits | 82731dc901780b959d6ebecc4f61be137c8d2884 | [
"MIT"
] | 283 | 2020-04-27T17:23:12.000Z | 2021-11-01T10:07:20.000Z | tests/conftest.py | stepansnigirev/lnbits | 82731dc901780b959d6ebecc4f61be137c8d2884 | [
"MIT"
] | 109 | 2020-04-28T06:00:17.000Z | 2021-10-13T02:48:28.000Z | import pytest
from lnbits.app import create_app
@pytest.fixture
async def client():
app = create_app()
app.config["TESTING"] = True
async with app.test_client() as client:
yield client
| 16.076923 | 43 | 0.684211 |
7944ffb46c06783739818db89100c0186e81bb42 | 1,764 | py | Python | migrations/versions/9d0c25ad18b3_added_the_classes_comment_and_pitch.py | GeGe-K/Pitcher-App | 4a970b37fe0fcd63ad3853a4f764c410a4acb640 | [
"MIT"
] | null | null | null | migrations/versions/9d0c25ad18b3_added_the_classes_comment_and_pitch.py | GeGe-K/Pitcher-App | 4a970b37fe0fcd63ad3853a4f764c410a4acb640 | [
"MIT"
] | null | null | null | migrations/versions/9d0c25ad18b3_added_the_classes_comment_and_pitch.py | GeGe-K/Pitcher-App | 4a970b37fe0fcd63ad3853a4f764c410a4acb640 | [
"MIT"
] | null | null | null | """added the classes comment and pitch.
Revision ID: 9d0c25ad18b3
Revises: ab0e2a2f42c6
Create Date: 2018-11-19 17:10:30.207222
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9d0c25ad18b3'
down_revision = 'ab0e2a2f42c6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('pitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('content', sa.String(length=255), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('date', sa.String(length=255), nullable=True),
sa.Column('time', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('date', sa.String(length=255), nullable=True),
sa.Column('time', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
op.drop_table('pitches')
# ### end Alembic commands ###
| 33.923077 | 69 | 0.671202 |
7944ffda4c268a7bf06635d90ec57d873341755f | 332 | py | Python | flask_mongodb/core/mixins.py | juanmanuel96/flask-mongodb | 4c4002a45294a55ecea67f4cf4f2e6a93fedc12a | [
"BSD-2-Clause"
] | null | null | null | flask_mongodb/core/mixins.py | juanmanuel96/flask-mongodb | 4c4002a45294a55ecea67f4cf4f2e6a93fedc12a | [
"BSD-2-Clause"
] | null | null | null | flask_mongodb/core/mixins.py | juanmanuel96/flask-mongodb | 4c4002a45294a55ecea67f4cf4f2e6a93fedc12a | [
"BSD-2-Clause"
] | null | null | null | import typing as t
class ModelMixin:
@property
def model(self):
from flask_mongodb.models import CollectionModel
self._model: t.Type[CollectionModel]
return self._model
class InimitableObject:
def __copy__(self):
return None
def __deepcopy__(self, memo):
return None
| 18.444444 | 56 | 0.659639 |
7945006e2566ed0ef7d7e42de1076c2f4f0ab846 | 850 | py | Python | ecfactory/mnt_curves/mnt_curves_examples.py | weikengchen/ecfactory | f509c00b7cf66f4b8dbe9540599a4c95b9742bfd | [
"MIT"
] | 39 | 2016-06-09T13:47:57.000Z | 2022-02-10T14:06:20.000Z | ecfactory/mnt_curves/mnt_curves_examples.py | frevson/ecfactory-A-SageMath-Library-for-Constructing-Elliptic-Curves | f509c00b7cf66f4b8dbe9540599a4c95b9742bfd | [
"MIT"
] | 3 | 2019-04-26T14:15:34.000Z | 2021-02-03T09:21:37.000Z | ecfactory/mnt_curves/mnt_curves_examples.py | frevson/ecfactory-A-SageMath-Library-for-Constructing-Elliptic-Curves | f509c00b7cf66f4b8dbe9540599a4c95b9742bfd | [
"MIT"
] | 13 | 2017-09-27T08:08:49.000Z | 2022-03-28T12:11:20.000Z | import ecfactory.mnt_curves as mnt
from ecfactory.utils import print_curve
# Example (MNT curve with k = 6 and D = -19)
curves = mnt.make_curve(6,-19)
q,t,r,k,D = curves[0]
print_curve(q,t,r,k,D)
# Example (MNT curve with k = 3 and D = -19)
curves = mnt.make_curve(3, -19)
q,t,r,k,D = curves[0]
print_curve(q,t,r,k,D)
# Enumerate through all MNT curves with k = 6 and -D < 200000
f = open("mnt6_enumeration.csv", 'w')
f.write('q,t,r,k,D\n')
D = -11
while -D < 200000:
try:
curves = mnt.make_curve(6,D)
if (len(curves) > 0):
for c in curves:
for i in range(0, len(c)):
if i != len(c) - 1:
f.write(str(c[i]) + ',')
else:
f.write(str(c[i]) + '\n')
except AssertionError as e:
pass
D -= 8
f.close()
| 26.5625 | 61 | 0.528235 |
794500c7a8b57298db9258336d55054f5abbbcee | 2,357 | py | Python | tests/test_tuple.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 14 | 2018-02-14T13:28:47.000Z | 2022-02-12T08:03:21.000Z | tests/test_tuple.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 142 | 2017-11-22T14:02:33.000Z | 2022-03-23T21:26:29.000Z | tests/test_tuple.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 4 | 2017-12-14T16:46:45.000Z | 2021-12-15T16:33:31.000Z | from pytest import raises
from typedpy import Structure, Tuple, Number, String, Integer, Float
class Example(Structure):
_additionalProperties = True
_required = []
# array support, similar to json schema
a = Tuple(uniqueItems=True, items=[String, String])
b = Tuple(items=[String, String, Number(maximum=10)])
c = Tuple[Integer, String, Float]
d = Tuple[Integer]
def test_wrong_type_for_tuple_err():
with raises(TypeError) as excinfo:
Example(a=2)
assert "a: Got 2; Expected <class 'tuple'>" in str(excinfo.value)
def test_wrong_type_for_tuple_items_err1():
with raises(TypeError) as excinfo:
Example(a=("aa", 2))
assert "a_1: Got 2; Expected a string" in str(excinfo.value)
def test_wrong_type_for_tuple_items_err2():
with raises(TypeError) as excinfo:
Example(c=(1, "aa", 2))
assert "c_2: Expected <class 'float'>" in str(excinfo.value)
def test_wrong_value_for_tuple_item_err():
with raises(ValueError) as excinfo:
Example(b=("aa", "bb", 92))
assert "b_2: Got 92; Expected a maximum of 10" in str(excinfo.value)
def test_wrong_length_for_tuple_items_err():
with raises(ValueError) as excinfo:
Example(a=("aa",))
assert "a: Got ('aa',); Expected a tuple of length 2" in str(excinfo.value)
def test_non_unique_items_err():
with raises(ValueError) as excinfo:
Example(a=("aa", "aa"))
assert "a: Got ('aa', 'aa'); Expected unique items" in str(excinfo.value)
def test_unique_items_valid():
assert Example(a=("aa", "bb")).a == ("aa", "bb")
def test_bad_items_definition_err():
with raises(TypeError) as excinfo:
Tuple(items=str)
assert "Expected a list/tuple of Fields or a single Field" in str(excinfo.value)
def test_simplified_definition_valid_assignment():
assert Example(c=(1, "bb", 0.5)).c[1:] == ("bb", 0.5)
def test_wrong_type_in_items_definition_err():
with raises(TypeError) as excinfo:
Tuple(items=[int, String])
assert "Expected a Field class or instance" in str(excinfo.value)
def test_single_type_tuple():
e = Example(d=(1, 2))
assert e.d[0] == 1
assert e.d == (1, 2)
def test_single_type_tuple_err1():
with raises(TypeError) as excinfo:
Example(d=(3, 2, "asdasd"))
assert "d_2: Expected <class 'int'>" in str(excinfo.value)
| 28.743902 | 84 | 0.673738 |
7945022240c284624b9b2a7a81f076ce8aea6a02 | 257 | py | Python | tests/conftest.py | dzubke/ship_detector | 28212be681914ad739544f6b849152f502289ff3 | [
"MIT"
] | 1 | 2019-11-28T02:39:31.000Z | 2019-11-28T02:39:31.000Z | tests/conftest.py | dzubke/ship_detector | 28212be681914ad739544f6b849152f502289ff3 | [
"MIT"
] | null | null | null | tests/conftest.py | dzubke/ship_detector | 28212be681914ad739544f6b849152f502289ff3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dummy conftest.py for ship_detection.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
| 21.416667 | 60 | 0.661479 |
7945022ea2833563a0a22f7a886f887ad7eb4672 | 414 | py | Python | dnslib/dns_reply.py | dineshkumar2509/learning-python | e8af11ff0b396da4c3f2cfe21d14131bae4b2adb | [
"MIT"
] | 86 | 2015-06-13T16:53:55.000Z | 2022-03-24T20:56:42.000Z | dnslib/dns_reply.py | pei-zheng-yi/learning-python | 55e350dfe44cf04f7d4408e76e72d2f467bd42ce | [
"MIT"
] | 9 | 2015-05-27T07:52:44.000Z | 2022-03-29T21:52:40.000Z | dnslib/dns_reply.py | pei-zheng-yi/learning-python | 55e350dfe44cf04f7d4408e76e72d2f467bd42ce | [
"MIT"
] | 124 | 2015-12-10T01:17:18.000Z | 2021-11-08T04:03:38.000Z |
#!/usr/bin/env python
from dnslib import *
q = DNSRecord(q=DNSQuestion("abc.com",QTYPE.ANY))
a = q.reply()
a.add_answer(RR("abc.com",QTYPE.A,rdata=A("1.2.3.4"),ttl=60))
print str(DNSRecord.parse(a.pack())) == str(a)
print a
a.add_answer(RR("xxx.abc.com",QTYPE.A,rdata=A("1.2.3.4")))
a.add_answer(RR("xxx.abc.com",QTYPE.AAAA,rdata=AAAA("1234:5678::1")))
print str(DNSRecord.parse(a.pack())) == str(a)
print a
| 23 | 69 | 0.664251 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.