hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a26fe886e8adceb9b8d81173228bcaaf66fda7b | 2,685 | py | Python | contrib/python-obsolete/setup.py | eido5/cubrid | f32dbe7cb90f096035c255d7b5f348438bbb5830 | [
"Apache-2.0",
"BSD-3-Clause"
] | 253 | 2016-03-12T01:03:42.000Z | 2022-03-14T08:24:39.000Z | contrib/python-obsolete/setup.py | eido5/cubrid | f32dbe7cb90f096035c255d7b5f348438bbb5830 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1,124 | 2016-03-31T03:48:58.000Z | 2022-03-31T23:44:04.000Z | contrib/python-obsolete/setup.py | eido5/cubrid | f32dbe7cb90f096035c255d7b5f348438bbb5830 | [
"Apache-2.0",
"BSD-3-Clause"
] | 268 | 2016-03-02T06:48:44.000Z | 2022-03-04T05:17:24.000Z | #
# Copyright (C) 2008 Search Solution Corporation.
# Copyright (c) 2016 CUBRID Corporation.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
#
#CUBRID-Python Distutils Setup
from distutils.core import setup, Extension
import os
import platform
import sys
if sys.argv[1] == 'install':
cci_lib_dir = ''
cci_inc_dir = ''
elif os.environ.has_key("CUBRID"):
if '64bit' in platform.architecture():
cci_lib_dir = os.environ["CUBRID"] + "/lib64"
else:
cci_lib_dir = os.environ["CUBRID"] + "/lib"
cci_inc_dir = os.environ["CUBRID"] + "/include"
else:
print "WARNING:",
print "it seems that you did not install CUBRID."
print "You must install CUBRID."
sys.exit(1)
setup(
name = "CUBRID-Python",
version = "0.5",
description="CUBRID API Module for Python",
author = "Kang, Dong-Wan",
author_email="[email protected]",
license="BSD",
url="http://dev.naver.com/projects/cubrid-python",
py_modules=["cubriddb"],
ext_modules=[
Extension(
"cubrid",
["cubrid.c"],
include_dirs = [cci_inc_dir],
library_dirs = [cci_lib_dir],
libraries = ["cascci"],
)
]
)
| 35.8 | 89 | 0.700931 |
4a26feb6bafc5ade9e8e2bb193282e9f5d50bb59 | 4,107 | py | Python | src/human_lambdas/user_handler/tests/test_forgotten_password.py | Human-Lambdas/human-lambdas | 9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50 | [
"Apache-2.0"
] | 25 | 2021-06-08T08:00:08.000Z | 2022-03-17T22:49:10.000Z | src/human_lambdas/user_handler/tests/test_forgotten_password.py | Human-Lambdas/human-lambdas | 9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50 | [
"Apache-2.0"
] | null | null | null | src/human_lambdas/user_handler/tests/test_forgotten_password.py | Human-Lambdas/human-lambdas | 9a2f2317f0c8dbfbfa88f3ba4994de7e6b2c4d50 | [
"Apache-2.0"
] | 5 | 2021-06-15T09:57:46.000Z | 2022-02-03T16:18:33.000Z | import logging
from django.test.utils import override_settings
from django.utils import timezone
from rest_framework import status
from rest_framework.test import APITestCase
from human_lambdas.user_handler.models import ForgottenPassword, User
logger = logging.getLogger(__name__)
class TestInvite(APITestCase):
def setUp(self):
self.valid_token = "thisisavalidtoken"
user = User(name="test", email="[email protected]")
user.save()
forgotten_password = ForgottenPassword(
email="[email protected]",
token=self.valid_token,
expires_at=timezone.now() + timezone.timedelta(15),
)
forgotten_password.save()
@override_settings(DEBUG=True)
def test_forgotten_password(self):
data = {"email": "[email protected]"}
response = self.client.post("/v1/users/forgotten-password", data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@override_settings(DEBUG=True)
def test_forgotten_password_bad_email(self):
data = {"email": "aaa.com"}
response = self.client.post("/v1/users/forgotten-password", data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_forgotten_password_bad_token(self):
response = self.client.get(
"/v1/users/forgotten-password-token/feo80w3fn83t4f2n0fnwf3wb793282fsu"
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_forgotten_password_good_token(self):
response = self.client.get(
"/v1/users/forgotten-password-token/{0}".format(self.valid_token)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# post endpoint after this line
def test_forgotten_password_post_wrong_token(self):
response = self.client.post(
"/v1/users/forgotten-password-token/{0}".format("thisisnotavalidtoken"),
{"password": "longlong"},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_forgotten_password_post_no_password(self):
response = self.client.post(
"/v1/users/forgotten-password-token/{0}".format(self.valid_token),
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_forgotten_password_post_short_password(self):
response = self.client.post(
"/v1/users/forgotten-password-token/{0}".format(self.valid_token),
{"password": "short"},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_forgotten_password_post_expired_token(self):
token = "thisisaanothersampletoken"
forgotten_password = ForgottenPassword(
email="[email protected]",
token=token,
expires_at=timezone.now() - timezone.timedelta(15),
)
forgotten_password.save()
response = self.client.post(
"/v1/users/forgotten-password-token/{0}".format(token),
{"password": "feefeefee"},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_forgotten_password_post(self):
token, email, password, new_password = (
"thisisthecorrectsampletoken",
"[email protected]",
"foofoofoo",
"feefeefee",
)
user = User(name="sample", email=email, password=password)
user.save()
forgotten_password = ForgottenPassword(
email=email,
token=token,
expires_at=timezone.now() + timezone.timedelta(15),
)
forgotten_password.save()
response = self.client.post(
"/v1/users/forgotten-password-token/{0}".format(token),
{"password": new_password},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.post(
"/v1/users/token", {"email": email, "password": new_password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 38.027778 | 84 | 0.658632 |
4a2700c68070bcc72e1d12f7ad22d715b92e7263 | 2,551 | py | Python | django_playlist/django_playlist/settings/local.py | kburts/django-playlist | d9b2fdc144fb1c227b683ae253bdcabd8a7e40c2 | [
"MIT"
] | null | null | null | django_playlist/django_playlist/settings/local.py | kburts/django-playlist | d9b2fdc144fb1c227b683ae253bdcabd8a7e40c2 | [
"MIT"
] | null | null | null | django_playlist/django_playlist/settings/local.py | kburts/django-playlist | d9b2fdc144fb1c227b683ae253bdcabd8a7e40c2 | [
"MIT"
] | null | null | null | """Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
#See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'polls',
'USER': 'django',
'PASSWORD': '12345',
'HOST': 'localhost',
'PORT': '', # Set to empty string for default.
}
}
'''
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
'''
INSTALLED_APPS += (
'debug_toolbar',
)
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
'''
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
########## END TOOLBAR CONFIGURATION
| 27.138298 | 93 | 0.669541 |
4a2700e70f06db76afd257038ebdf0f36ad6fdfe | 5,341 | py | Python | src/models/layers/aspp.py | Klimorg/template_segmentation | f5a5066905acb06c66793d9a361eae8570652af2 | [
"Apache-2.0"
] | 1 | 2022-02-01T06:54:22.000Z | 2022-02-01T06:54:22.000Z | src/models/layers/aspp.py | Klimorg/template_segmentation | f5a5066905acb06c66793d9a361eae8570652af2 | [
"Apache-2.0"
] | 34 | 2021-11-01T14:19:35.000Z | 2022-03-28T14:25:35.000Z | src/models/layers/aspp.py | Klimorg/template_segmentation | f5a5066905acb06c66793d9a361eae8570652af2 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict
import tensorflow as tf
from tensorflow.keras.layers import (
AveragePooling2D,
BatchNormalization,
Concatenate,
Conv2D,
Layer,
ReLU,
UpSampling2D,
)
from tensorflow.keras.models import Sequential
@tf.keras.utils.register_keras_serializable()
class ASPP(Layer):
"""
Description of ASPP.
Attributes:
conv1 (Layer): `Conv2D-BatchNormalization-ReLU` layer.
conv2 (Layer): `Conv2D-BatchNormalization-ReLU` layer.
conv3 (Layer): `Conv2D-BatchNormalization-ReLU` layer.
conv4 (Layer): `Conv2D-BatchNormalization-ReLU` layer.
conv5 (Layer): `Conv2D-BatchNormalization-ReLU` layer.
conv6 (Layer): `Conv2D-BatchNormalization-ReLU` layer.
Inheritance:
tf.keras.layers.Layer:
"""
def __init__(
self,
filters: int,
l2_regul: float = 1e-4,
*args,
**kwargs,
) -> None:
"""Initialization of the class.
Args:
filters (int, optional): Number of filters in each `Conv2D` layers.
l2_regul (float, optional): Value of the constraint used for the
$L_2$ regularization. Defaults to 1e-4.
"""
super().__init__(*args, **kwargs)
self.filters = filters
self.l2_regul = l2_regul
self.dilation_rate = [6, 12, 18]
self.concat = Concatenate(axis=-1)
self.conv1 = Sequential(
[
Conv2D(
filters,
kernel_size=(1, 1),
padding="same",
use_bias=False,
kernel_initializer="he_uniform",
kernel_regularizer=tf.keras.regularizers.l2(l2=l2_regul),
),
BatchNormalization(),
ReLU(),
],
)
self.conv2 = Sequential(
[
Conv2D(
filters,
kernel_size=(3, 3),
padding="same",
use_bias=False,
kernel_initializer="he_uniform",
dilation_rate=self.dilation_rate[0],
kernel_regularizer=tf.keras.regularizers.l2(l2=l2_regul),
),
BatchNormalization(),
ReLU(),
],
)
self.conv3 = Sequential(
[
Conv2D(
filters,
kernel_size=(3, 3),
padding="same",
use_bias=False,
kernel_initializer="he_uniform",
dilation_rate=self.dilation_rate[1],
kernel_regularizer=tf.keras.regularizers.l2(l2=l2_regul),
),
BatchNormalization(),
ReLU(),
],
)
self.conv4 = Sequential(
[
Conv2D(
filters,
kernel_size=(3, 3),
padding="same",
use_bias=False,
kernel_initializer="he_uniform",
dilation_rate=self.dilation_rate[2],
kernel_regularizer=tf.keras.regularizers.l2(l2=l2_regul),
),
BatchNormalization(),
ReLU(),
],
)
self.conv5 = Sequential(
[
Conv2D(
filters,
kernel_size=(1, 1),
padding="same",
use_bias=False,
kernel_initializer="he_uniform",
dilation_rate=1,
kernel_regularizer=tf.keras.regularizers.l2(l2=l2_regul),
),
BatchNormalization(),
ReLU(),
],
)
self.conv6 = Sequential(
[
Conv2D(
filters,
kernel_size=(1, 1),
padding="same",
use_bias=False,
kernel_initializer="he_uniform",
dilation_rate=1,
kernel_regularizer=tf.keras.regularizers.l2(l2=l2_regul),
),
BatchNormalization(),
ReLU(),
],
)
def build(self, input_shape) -> None:
_, height, width, *_ = input_shape
self.pooling = AveragePooling2D(pool_size=(height, width))
self.upsample = UpSampling2D(size=(height, width), interpolation="bilinear")
def call(self, inputs, training=None) -> tf.Tensor:
fmap1 = self.conv1(inputs)
fmap2 = self.conv2(inputs)
fmap3 = self.conv3(inputs)
fmap4 = self.conv4(inputs)
fmap_pool = self.pooling(inputs)
fmap_pool = self.conv5(fmap_pool)
fmap_pool = self.upsample(fmap_pool)
fmap = self.concat([fmap_pool, fmap1, fmap2, fmap3, fmap4])
return self.conv6(fmap)
def get_config(self) -> Dict[str, Any]:
config = super().get_config()
config.update(
{
"filters": self.filters,
"l2_regularization": self.l2_regul,
"dilation_rate": self.dilation_rate,
},
)
return config
| 29.346154 | 84 | 0.482307 |
4a27025f09cdf789d2b089c5093c138bd50fa094 | 2,201 | py | Python | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/AddAuthResourceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/AddAuthResourceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/AddAuthResourceRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class AddAuthResourceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'AddAuthResource','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_GatewayUniqueId(self): # String
return self.get_query_params().get('GatewayUniqueId')
def set_GatewayUniqueId(self, GatewayUniqueId): # String
self.add_query_param('GatewayUniqueId', GatewayUniqueId)
def get_AuthId(self): # Long
return self.get_query_params().get('AuthId')
def set_AuthId(self, AuthId): # Long
self.add_query_param('AuthId', AuthId)
def get_DomainId(self): # Long
return self.get_query_params().get('DomainId')
def set_DomainId(self, DomainId): # Long
self.add_query_param('DomainId', DomainId)
def get_Path(self): # String
return self.get_query_params().get('Path')
def set_Path(self, Path): # String
self.add_query_param('Path', Path)
def get_AcceptLanguage(self): # String
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_query_param('AcceptLanguage', AcceptLanguage)
| 37.305085 | 74 | 0.754203 |
4a270394aa718637de827bb494e3d1458dc42f56 | 9,769 | py | Python | models.py | Fraunhofer-SIT/ModExTransformer | 709e321ba68d7b4f1e55e50230adec5389574701 | [
"Apache-2.0"
] | 1 | 2022-03-21T13:16:42.000Z | 2022-03-21T13:16:42.000Z | models.py | Fraunhofer-SIT/ModExTransformer | 709e321ba68d7b4f1e55e50230adec5389574701 | [
"Apache-2.0"
] | null | null | null | models.py | Fraunhofer-SIT/ModExTransformer | 709e321ba68d7b4f1e55e50230adec5389574701 | [
"Apache-2.0"
] | null | null | null |
# Apache 2.0 License
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
""" This file was copied from: https://github.com/facebookresearch/deit/blob/main/models.py
and modified by Fraunhofer SIT in order to use the DeiT as attack model in a model extraction attack.
Modified passages are marked as follows:
#### Begin modifications
Code added or modified
#### End modifications
Apache 2.0 License
Copyright (c) 2022, Fraunhofer e.V.
All rights reserved.
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384',
]
class DistilledVisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
trunc_normal_(self.dist_token, std=.02)
trunc_normal_(self.pos_embed, std=.02)
self.head_dist.apply(self._init_weights)
def forward_features(self, x):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add the dist_token
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0], x[:, 1]
def forward(self, x):
x, x_dist = self.forward_features(x)
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.training:
return x, x_dist
else:
# during inference, return the average of both classifier predictions
return (x + x_dist) / 2
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
if kwargs['num_classes'] != len(checkpoint['model']['head.bias']):
checkpoint["model"].pop('head.weight')
checkpoint["model"].pop('head.bias')
model.load_state_dict(checkpoint["model"], strict=False)
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
if kwargs['num_classes'] != len(checkpoint['model']['head.bias']):
checkpoint["model"].pop('head.weight')
checkpoint["model"].pop('head.bias')
model.load_state_dict(checkpoint["model"], strict=False)
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
if kwargs['num_classes'] != len(checkpoint['model']['head.bias']):
checkpoint["model"].pop('head.weight')
checkpoint["model"].pop('head.bias')
model.load_state_dict(checkpoint["model"], strict=False)
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth",
map_location="cpu", check_hash=True
)
if kwargs['num_classes'] != len(checkpoint['model']['head.bias']):
checkpoint["model"].pop('head.weight')
checkpoint["model"].pop('head.bias')
checkpoint["model"].pop('head_dist.weight')
checkpoint["model"].pop('head_dist.bias')
model.load_state_dict(checkpoint["model"], strict=False)
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth",
map_location="cpu", check_hash=True
)
if kwargs['num_classes'] != len(checkpoint['model']['head.bias']):
checkpoint["model"].pop('head.weight')
checkpoint["model"].pop('head.bias')
checkpoint["model"].pop('head_dist.weight')
checkpoint["model"].pop('head_dist.bias')
model.load_state_dict(checkpoint["model"], strict=False)
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
map_location="cpu", check_hash=True
)
if kwargs['num_classes'] != len(checkpoint['model']['head.bias']):
checkpoint["model"].pop('head.weight')
checkpoint["model"].pop('head.bias')
checkpoint["model"].pop('head_dist.weight')
checkpoint["model"].pop('head_dist.bias')
model.load_state_dict(checkpoint["model"], strict=False)
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
map_location="cpu", check_hash=True
)
if kwargs['num_classes'] != len(checkpoint['model']['head.bias']):
checkpoint["model"].pop('head.weight')
checkpoint["model"].pop('head.bias')
model.load_state_dict(checkpoint["model"], strict=False)
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
map_location="cpu", check_hash=True
)
if kwargs['num_classes'] != len(checkpoint['model']['head.bias']):
checkpoint["model"].pop('head.weight')
checkpoint["model"].pop('head.bias')
checkpoint["model"].pop('head_dist.weight')
checkpoint["model"].pop('head_dist.bias')
model.load_state_dict(checkpoint["model"], strict=False)
return model
#### Begin modifications
@register_model
def googlenet(pretrained=True, num_classes=1000, **kwargs):
from torchvision.models import googlenet
model = googlenet(pretrained=pretrained)
if num_classes != 1000:
model.fc = nn.Linear(in_features=1024, out_features=num_classes, bias=True)
return model
#### End modifications
| 39.232932 | 116 | 0.667929 |
4a2703dedf744199d59cfafa477e643e8eed1954 | 2,929 | py | Python | corus/sources/mokoron.py | Ilseyar/corus | 61a4776f5e534469bb9df1e451b6a6d5fc0e991b | [
"MIT"
] | 205 | 2019-05-01T07:38:01.000Z | 2022-03-30T04:02:54.000Z | corus/sources/mokoron.py | Ilseyar/corus | 61a4776f5e534469bb9df1e451b6a6d5fc0e991b | [
"MIT"
] | 78 | 2019-04-29T06:53:53.000Z | 2021-09-20T14:51:25.000Z | corus/sources/mokoron.py | Ilseyar/corus | 61a4776f5e534469bb9df1e451b6a6d5fc0e991b | [
"MIT"
] | 18 | 2019-06-19T09:56:10.000Z | 2022-01-30T14:55:14.000Z |
import re
from datetime import datetime
from corus.record import Record
from corus.io import load_lines
# – id: уникальный номер сообщения в системе twitter;
# – tdate: дата публикации сообщения (твита);
# – tmane: имя пользователя, опубликовавшего сообщение;
# – ttext: текст сообщения (твита);
# – ttype: поле в котором в дальнейшем будет указано к кому классу относится твит (положительный, отрицательный, нейтральный);
# – trep: количество реплаев к данному сообщению. В настоящий момент API твиттера не отдает эту информацию;
# – tfav: число сколько раз данное сообщение было добавлено в избранное другими пользователями;
# – tstcount: число всех сообщений пользователя в сети twitter;
# – tfol: количество фоловеров пользователя (тех людей, которые читают пользователя);
# – tfrien: количество друзей пользователя (те люди, которых читает пользователь);
# – listcount: количество листов-подписок в которые добавлен твиттер-пользователь.
class MokoronRecord(Record):
__attributes__ = [
'id', 'timestamp', 'user', 'text', 'sentiment',
'replies', 'retweets', 'favourites', 'posts',
'followers', 'friends', 'lists'
]
def __init__(self, id, timestamp, user, text, sentiment,
replies, retweets, favourites, posts, followers, friends, lists):
self.id = id
self.timestamp = timestamp
self.user = user
self.text = text
self.sentiment = sentiment
self.replies = replies
self.retweets = retweets
self.favourites = favourites
self.posts = posts
self.followers = followers
self.friends = friends
self.lists = lists
@classmethod
def from_match(cls, match):
dict = match.groupdict()
for key in ['id', 'sentiment', 'replies', 'retweets',
'favourites', 'posts', 'followers', 'friends', 'lists']:
dict[key] = int(dict[key])
dict['timestamp'] = datetime.utcfromtimestamp(float(dict['timestamp']))
return cls(**dict)
# INSERT INTO `sentiment` VALUES (408906695721877504,'1386325928','Va5ilina','Пропавшая в Хабаровске школьница почти сутки провела в яме у коллектор',2,0,0,0,183,95,158,0),(408906695700520960,'1386325928','i_wont_judge_ya','ЛЕНТА, Я СЕГОДНЯ ПОЛГОДА ДИРЕКШИОНЕЕЕЕР! С:\nХОТЯ ВСЕ РАВНО НИКТО НЕ ПОЗДРАВИТ ЛОЛ',2,0,0,0,19809,804,257,11),
INSERT = 'INSERT INTO `sentiment` VALUES'
RECORD = re.compile(r'''
\(
(?P<id>\d+),
'(?P<timestamp>\d+)',
'(?P<user>.+?)',
'(?P<text>.+?)',
(?P<sentiment>\d+),
(?P<replies>\d+),
(?P<retweets>\d+),
(?P<favourites>\d+),
(?P<posts>\d+),
(?P<followers>\d+),
(?P<friends>\d+),
(?P<lists>\d+)
\)
''', re.X)
def load_mokoron(path):
for line in load_lines(path):
if line.startswith(INSERT):
for match in RECORD.finditer(line):
yield MokoronRecord.from_match(match)
__all__ = [
'load_mokoron'
]
| 34.05814 | 334 | 0.656879 |
4a270429055799bb11c5f1a4697f3b53b4e71743 | 13,409 | py | Python | stage/test_dataformats.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | null | null | null | stage/test_dataformats.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | 1 | 2019-04-24T11:06:38.000Z | 2019-04-24T11:06:38.000Z | stage/test_dataformats.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | 2 | 2019-05-24T06:34:37.000Z | 2020-03-30T11:48:18.000Z | # Copyright 2018 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
from streamsets.sdk.utils import Version
from decimal import Decimal
from streamsets.testframework.markers import sdc_min_version
from stage.utils.utils_xml import get_xml_output_field
logger = logging.getLogger(__name__)
#
# Text base file format parsing via Data Parser processor
#
def create_text_pipeline(sdc_builder, data_format, content, **parser_configs):
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Dev Raw Data Source')
origin.data_format = 'TEXT'
origin.raw_data = content
parser = builder.add_stage('Data Parser')
parser.field_to_parse = '/text'
parser.target_field = '/'
parser.data_format = data_format
if parser_configs:
parser.set_attributes(**parser_configs)
trash = builder.add_stage('Trash')
origin >> parser >> trash
return builder.build('Parse {}'.format(data_format))
@sdc_min_version('3.0.0.0')
def test_parse_json(sdc_builder, sdc_executor):
"""Validate parsing of JSON content via the Data Parser processor."""
pipeline = create_text_pipeline(sdc_builder, 'JSON', '{"key" : "value"}')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot['DataParser_01'].output) == 1
assert snapshot['DataParser_01'].output[0].get_field_data('/key') == 'value'
@sdc_min_version('3.0.0.0')
def test_parse_delimited(sdc_builder, sdc_executor):
"""Validate parsing of delimited content via the Data Parser processor."""
pipeline = create_text_pipeline(sdc_builder, 'DELIMITED', '1,2,3')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot['DataParser_01'].output) == 1
assert snapshot['DataParser_01'].output[0].get_field_data('[0]') == '1'
assert snapshot['DataParser_01'].output[0].get_field_data('[1]') == '2'
assert snapshot['DataParser_01'].output[0].get_field_data('[2]') == '3'
@sdc_min_version('3.8.0')
def test_parse_multichar_delimited(sdc_builder, sdc_executor):
"""Validate parsing of delimited content via the Data Parser processor."""
pipeline = create_text_pipeline(sdc_builder, 'DELIMITED', 'abcd||efgh||ijkl',
delimiter_format_type='MULTI_CHARACTER',
multi_character_field_delimiter='||', header_line='NO_HEADER')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
outputs = snapshot['DataParser_01'].output
assert len(outputs) == 1
output_record = outputs[0]
assert output_record.get_field_data('[0]') == 'abcd'
assert output_record.get_field_data('[1]') == 'efgh'
assert output_record.get_field_data('[2]') == 'ijkl'
@sdc_min_version('3.0.0.0')
def test_parse_log(sdc_builder, sdc_executor):
"""Validate parsing of log content via the Data Parser processor."""
pipeline = create_text_pipeline(sdc_builder, 'LOG', '127.0.0.1 ss h [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot['DataParser_01'].output) == 1
assert snapshot['DataParser_01'].output[0].get_field_data('/request') == '/apache_pb.gif'
assert snapshot['DataParser_01'].output[0].get_field_data('/clientip') == '127.0.0.1'
def test_parse_syslog(sdc_builder, sdc_executor):
"""Validate parsing of syslog content via the Data Parser processor."""
pipeline = create_text_pipeline(sdc_builder, 'SYSLOG', "<34>Oct 11 22:14:15 mymachine su: 'su root' failed for lonvick on /dev/pts/")
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot['DataParser_01'].output) == 1
assert snapshot['DataParser_01'].output[0].get_field_data('/severity') == 2
assert snapshot['DataParser_01'].output[0].get_field_data('/host') == 'mymachine'
@sdc_min_version('3.0.0.0')
def test_parse_xml(sdc_builder, sdc_executor):
"""Validate parsing of xml content via the Data Parser processor."""
pipeline = create_text_pipeline(sdc_builder, 'XML', "<root><key>value</key></root>")
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot['DataParser_01'].output) == 1
key_field = get_xml_output_field(pipeline[0], snapshot['DataParser_01'].output[0].field, 'root')
assert key_field['key'][0]['value'] == 'value'
@sdc_min_version('3.14.0')
def test_parse_xml_preserve_root_element(sdc_builder, sdc_executor):
"""Validate parsing of xml content via the Data Parser processor.
Since 3.14.0 there is a new property 'preserve root element', set to True by default"""
pipeline = create_text_pipeline(sdc_builder, 'XML', "<root><key>value</key></root>", preserve_root_element=True)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot['DataParser_01'].output) == 1
assert snapshot['DataParser_01'].output[0].get_field_data('/root/key[0]/value') == 'value'
# SDC-11018: Re-scale data when writing Decimal into Avro
@sdc_min_version('3.2.0.0') # Data Generator
def test_avro_decimal_incorrect_scale(sdc_builder, sdc_executor):
"""Make sure that we auto-rescale decimal as needed when writing to Avro.
raw data source >> type converter >> generator >> parser >> trash
"""
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = """{"a": "1.10"}
{"a": null}"""
type_converter = builder.add_stage('Field Type Converter')
type_converter.conversion_method = 'BY_FIELD'
type_converter.field_type_converter_configs = [{
"fields" : [ "/a" ],
"targetType" : "DECIMAL",
"scale" : 2,
"decimalScaleRoundingStrategy" : "ROUND_HALF_EVEN"
}]
generator = builder.add_stage('Data Generator')
generator.data_format = 'AVRO'
generator.avro_schema_location = 'INLINE'
generator.avro_schema = """{
"type" : "record",
"name" : "TestDecimal",
"fields" : [ {
"name" : "a",
"type" : [ "null", {
"type" : "bytes",
"logicalType" : "decimal",
"precision" : 7,
"scale" : 5
} ],
"default" : null
}]
}"""
parser = builder.add_stage('Data Parser')
parser.field_to_parse = '/'
parser.target_field = '/'
parser.data_format = 'AVRO'
parser.avro_schema_location = 'SOURCE'
trash = builder.add_stage('Trash')
source >> type_converter >> generator >> parser >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
assert len(snapshot[parser].output) == 2
assert snapshot[parser].output[0].get_field_data('/a') == Decimal('1.10000')
assert snapshot[parser].output[1].get_field_data('/a') == None
# SDC-11022: Do not use avro union index when writing avro data
@sdc_min_version('3.2.0.0') # Data Generator
def test_avro_decimal_union_index_on_write(sdc_builder, sdc_executor):
"""Make sure that avro union index is not used when writing data out to Avro file format.
raw data source >> expression >> generator >> parser >> trash
"""
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '{"a": "b"}'
# Use clearly non-existing typeIndex
expression = builder.add_stage('Expression Evaluator')
expression.header_attribute_expressions = [
{'attributeToSet': 'avro.union.typeIndex./a', 'headerAttributeExpression': '666'}
]
generator = builder.add_stage('Data Generator')
generator.data_format = 'AVRO'
generator.avro_schema_location = 'INLINE'
generator.avro_schema = """{
"type" : "record",
"name" : "TestDecimal",
"fields" : [ {
"name" : "a",
"type" : [ "null", "int", "string"],
"default" : null
}]
}"""
parser = builder.add_stage('Data Parser')
parser.field_to_parse = '/'
parser.target_field = '/'
parser.data_format = 'AVRO'
parser.avro_schema_location = 'SOURCE'
trash = builder.add_stage('Trash')
source >> expression >> generator >> parser >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
assert len(snapshot[parser].output) == 1
assert snapshot[parser].output[0].get_field_data('/a') == 'b'
# SDC-11557: Publish field attributes for typed nulls when reading Avro
@sdc_min_version('3.9.0')
def test_avro_decimal_field_attributes_for_typed_null(sdc_builder, sdc_executor):
"""Make sure that we persist decimal's field attributes for typed nul """
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '{"decimal": "12.01"}{"decimal":null}'
converter = builder.add_stage('Field Type Converter')
converter.conversion_method = 'BY_FIELD'
converter.field_type_converter_configs = [{
'fields': ['/decimal'],
'targetType': 'DECIMAL'
}]
generator = builder.add_stage('Data Generator')
generator.data_format = 'AVRO'
generator.avro_schema_location = 'INLINE'
generator.avro_schema = """{
"type" : "record",
"name" : "TestDecimal",
"fields" : [ {
"name" : "decimal",
"type" : ["null", {"name": "name", "type": "bytes", "logicalType": "decimal", "precision":4, "scale":2}],
"default" : null
}]
}"""
parser = builder.add_stage('Data Parser')
parser.field_to_parse = '/'
parser.target_field = '/'
parser.data_format = 'AVRO'
parser.avro_schema_location = 'SOURCE'
trash = builder.add_stage('Trash')
source >> converter >> generator >> parser >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
assert len(snapshot[parser].output) == 2
assert snapshot[parser].output[0].get_field_data('/decimal') == Decimal("12.01")
assert snapshot[parser].output[1].get_field_data('/decimal') == None
assert snapshot[parser].output[0].get_field_data('/decimal').attributes['precision'] == '4'
assert snapshot[parser].output[1].get_field_data('/decimal').attributes['precision'] == '4'
assert snapshot[parser].output[0].get_field_data('/decimal').attributes['scale'] == '2'
assert snapshot[parser].output[1].get_field_data('/decimal').attributes['scale'] == '2'
# SDC-11869: Add ability to specify quote mode when generating CSV data
@sdc_min_version('3.10.0')
@pytest.mark.parametrize('quote_mode,expected', [
('ALL', '"a"|"b"|" c"\r\n'),
('MINIMAL', 'a|b|" c"\r\n'),
('NONE', 'a|b| c\r\n')
])
def test_delimited_quote_mode(sdc_builder, sdc_executor, quote_mode, expected):
"""Ensure that delimited quote mode works properly."""
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'DELIMITED'
source.header_line = 'WITH_HEADER'
source.raw_data = 'a,b,c\na,b, c'
generator = builder.add_stage('Data Generator')
generator.data_format = 'DELIMITED'
generator.delimiter_format = 'CUSTOM'
generator.quote_mode = quote_mode
generator.output_type = 'STRING'
# Due to STF-833
generator.target_field = '/target'
trash = builder.add_stage('Trash')
source >> generator >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
assert len(snapshot[generator].output) == 1
assert snapshot[generator].output[0].get_field_data('/target') == expected
| 37.455307 | 141 | 0.69431 |
4a2704aa64be8a8c31100473b9806dc75ba7a26e | 112 | py | Python | mlask/__init__.py | name-yh2022/pymlask | 00e1602502b122db87f69236da7ed9cae0514abe | [
"BSD-3-Clause"
] | 89 | 2017-02-10T13:02:15.000Z | 2022-02-12T06:49:06.000Z | mlask/__init__.py | name-yh2022/pymlask | 00e1602502b122db87f69236da7ed9cae0514abe | [
"BSD-3-Clause"
] | 14 | 2017-02-12T06:22:01.000Z | 2022-01-16T03:17:53.000Z | mlask/__init__.py | name-yh2022/pymlask | 00e1602502b122db87f69236da7ed9cae0514abe | [
"BSD-3-Clause"
] | 19 | 2017-02-12T05:44:24.000Z | 2022-02-17T03:31:06.000Z | # -*- coding: utf-8 -*-
from .mlask import MLAsk
VERSION = (0, 3, 2)
__version__ = '0.3.2'
__all__ = ['MLAsk']
| 16 | 24 | 0.589286 |
4a2704b4df752ff7cd6e66bcd230cfed9d3640a7 | 975 | py | Python | Chapter06/2multinomial.py | Jchen-sudo/ml-code-record | 4a6f8237c66b381832ebd534cad92c684dc5fe3b | [
"MIT"
] | 50 | 2018-08-13T13:11:04.000Z | 2022-02-17T23:00:20.000Z | Chapter06/2multinomial.py | Jchen-sudo/ml-code-record | 4a6f8237c66b381832ebd534cad92c684dc5fe3b | [
"MIT"
] | null | null | null | Chapter06/2multinomial.py | Jchen-sudo/ml-code-record | 4a6f8237c66b381832ebd534cad92c684dc5fe3b | [
"MIT"
] | 29 | 2018-06-08T10:56:40.000Z | 2022-02-19T06:26:23.000Z | from __future__ import print_function
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.naive_bayes import MultinomialNB
# For reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Prepare a dummy dataset
data = [
{'house': 100, 'street': 50, 'shop': 25, 'car': 100, 'tree': 20},
{'house': 5, 'street': 5, 'shop': 0, 'car': 10, 'tree': 500, 'river': 1}
]
# Create and train a dictionary vectorizer
dv = DictVectorizer(sparse=False)
X = dv.fit_transform(data)
Y = np.array([1, 0])
# Create and train a Multinomial Naive Bayes classifier
mnb = MultinomialNB()
mnb.fit(X, Y)
# Create dummy test data
test_data = data = [
{'house': 80, 'street': 20, 'shop': 15, 'car': 70, 'tree': 10, 'river': 1},
{'house': 10, 'street': 5, 'shop': 1, 'car': 8, 'tree': 300, 'river': 0}
]
Yp = mnb.predict(dv.fit_transform(test_data))
print(Yp)
| 26.351351 | 83 | 0.611282 |
4a27061d373cd4de43c133e5ef7316b076acd0c8 | 2,399 | py | Python | fluiddb/api/util.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | 3 | 2021-05-10T14:41:30.000Z | 2021-12-16T05:53:30.000Z | fluiddb/api/util.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | null | null | null | fluiddb/api/util.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | 2 | 2018-01-24T09:03:21.000Z | 2021-06-25T08:34:54.000Z | from fluiddb.data.permission import Operation
CATEGORY_AND_ACTION_BY_OPERATION = {
Operation.CREATE_NAMESPACE: (u'namespaces', u'create'),
Operation.UPDATE_NAMESPACE: (u'namespaces', u'update'),
Operation.DELETE_NAMESPACE: (u'namespaces', u'delete'),
Operation.LIST_NAMESPACE: (u'namespaces', u'list'),
Operation.CONTROL_NAMESPACE: (u'namespaces', u'control'),
Operation.UPDATE_TAG: (u'tags', u'update'),
Operation.DELETE_TAG: (u'tags', u'delete'),
Operation.CONTROL_TAG: (u'tags', u'control'),
Operation.WRITE_TAG_VALUE: (u'tag-values', u'write'),
Operation.READ_TAG_VALUE: (u'tag-values', u'read'),
Operation.DELETE_TAG_VALUE: (u'tag-values', u'delete'),
Operation.CONTROL_TAG_VALUE: (u'tag-values', u'control'),
Operation.CREATE_USER: (u'users', 'create'),
Operation.DELETE_USER: (u'users', 'delete'),
Operation.UPDATE_USER: (u'users', 'update'),
Operation.CREATE_OBJECT: (u'objects', 'create')}
OPERATION_BY_ACTION = {
(u'namespaces', u'create'): Operation.CREATE_NAMESPACE,
(u'namespaces', u'update'): Operation.UPDATE_NAMESPACE,
(u'namespaces', u'delete'): Operation.DELETE_NAMESPACE,
(u'namespaces', u'list'): Operation.LIST_NAMESPACE,
(u'namespaces', u'control'): Operation.CONTROL_NAMESPACE,
(u'tags', u'update'): Operation.UPDATE_TAG,
(u'tags', u'delete'): Operation.DELETE_TAG,
(u'tags', u'control'): Operation.CONTROL_TAG,
# 'create' is provided for backwards compatibility. The preferred action
# is 'write'.
(u'tag-values', u'create'): Operation.WRITE_TAG_VALUE,
(u'tag-values', u'write'): Operation.WRITE_TAG_VALUE,
(u'tag-values', u'read'): Operation.READ_TAG_VALUE,
(u'tag-values', u'delete'): Operation.DELETE_TAG_VALUE,
(u'tag-values', u'control'): Operation.CONTROL_TAG_VALUE,
}
def getOperation(category, action):
"""Get an L{Operation} value for the given C{category} and C{action}.
@param category: The category of the operation.
@param action: The action for the category.
@return: An L{Operation} value.
"""
return OPERATION_BY_ACTION[(category, action)]
def getCategoryAndAction(operation):
"""Returns a category and action for a given L{Operation} value.
@param operation: An L{Operation} value.
@return: A C{(category, action)} 2-tuple.
"""
return CATEGORY_AND_ACTION_BY_OPERATION[operation]
| 40.661017 | 77 | 0.697791 |
4a27069d1076138b39c65a74aa54242265cb2583 | 42,328 | py | Python | pytorch_lightning/trainer/trainer.py | ybrovman/pytorch-lightning | c1c6e3b6c988c690f9e2cd5726da4ab3402dbd03 | [
"Apache-2.0"
] | 1 | 2020-05-07T15:15:40.000Z | 2020-05-07T15:15:40.000Z | pytorch_lightning/trainer/trainer.py | ybrovman/pytorch-lightning | c1c6e3b6c988c690f9e2cd5726da4ab3402dbd03 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/trainer.py | ybrovman/pytorch-lightning | c1c6e3b6c988c690f9e2cd5726da4ab3402dbd03 | [
"Apache-2.0"
] | null | null | null | import distutils
import inspect
import os
import sys
from argparse import ArgumentParser
from typing import Union, Optional, List, Dict, Tuple, Iterable, Any
import torch
import torch.distributed as torch_distrib
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, Callback
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.profiler import SimpleProfiler, PassThroughProfiler, BaseProfiler
from pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin
from pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin
from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_8, TrainerDeprecatedAPITillVer0_9
from pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin
from pytorch_lightning.trainer.distrib_parts import (
TrainerDPMixin, parse_gpu_ids, determine_root_gpu_device, pick_multiple_gpus)
from pytorch_lightning.trainer.evaluation_loop import TrainerEvaluationLoopMixin
from pytorch_lightning.trainer.logging import TrainerLoggingMixin
from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin
from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin
from pytorch_lightning.trainer.supporters import TensorRunningAccum
from pytorch_lightning.trainer.training_io import TrainerIOMixin
from pytorch_lightning.trainer.training_loop import TrainerTrainLoopMixin
from pytorch_lightning.trainer.training_tricks import TrainerTrainingTricksMixin
from pytorch_lightning.trainer.lr_finder import TrainerLRFinderMixin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities import rank_zero_warn
try:
from apex import amp
except ImportError:
APEX_AVAILABLE = False
else:
APEX_AVAILABLE = True
try:
import torch_xla
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class Trainer(
TrainerIOMixin,
TrainerOptimizersMixin,
TrainerAMPMixin,
TrainerDPMixin,
TrainerDDPMixin,
TrainerLoggingMixin,
TrainerModelHooksMixin,
TrainerTrainingTricksMixin,
TrainerDataLoadingMixin,
TrainerEvaluationLoopMixin,
TrainerTrainLoopMixin,
TrainerCallbackConfigMixin,
TrainerCallbackHookMixin,
TrainerLRFinderMixin,
TrainerDeprecatedAPITillVer0_8,
TrainerDeprecatedAPITillVer0_9,
):
DEPRECATED_IN_0_8 = (
'gradient_clip', 'nb_gpu_nodes', 'max_nb_epochs', 'min_nb_epochs',
'add_row_log_interval', 'nb_sanity_val_steps'
)
DEPRECATED_IN_0_9 = ('use_amp', 'show_progress_bar')
def __init__(
self,
logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,
checkpoint_callback: Union[ModelCheckpoint, bool] = True,
early_stop_callback: Optional[Union[EarlyStopping, bool]] = False,
callbacks: Optional[List[Callback]] = None,
default_root_dir: Optional[str] = None,
gradient_clip_val: float = 0,
process_position: int = 0,
num_nodes: int = 1,
num_processes: int = 1,
gpus: Optional[Union[List[int], str, int]] = None,
auto_select_gpus: bool = False,
num_tpu_cores: Optional[int] = None,
log_gpu_memory: Optional[str] = None,
progress_bar_refresh_rate: int = 1,
overfit_pct: float = 0.0,
track_grad_norm: int = -1,
check_val_every_n_epoch: int = 1,
fast_dev_run: bool = False,
accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = 1,
max_epochs: int = 1000,
min_epochs: int = 1,
max_steps: Optional[int] = None,
min_steps: Optional[int] = None,
train_percent_check: float = 1.0,
val_percent_check: float = 1.0,
test_percent_check: float = 1.0,
val_check_interval: float = 1.0,
log_save_interval: int = 100,
row_log_interval: int = 10,
add_row_log_interval=None, # backward compatible, todo: remove in v0.8.0
distributed_backend: Optional[str] = None,
precision: int = 32,
print_nan_grads: bool = False, # backward compatible, todo: remove in v0.9.0
weights_summary: Optional[str] = 'full',
weights_save_path: Optional[str] = None,
amp_level: str = 'O1',
num_sanity_val_steps: int = 5,
truncated_bptt_steps: Optional[int] = None,
resume_from_checkpoint: Optional[str] = None,
profiler: Optional[BaseProfiler] = None,
benchmark: bool = False,
reload_dataloaders_every_epoch: bool = False,
auto_lr_find: Union[bool, str] = False,
replace_sampler_ddp: bool = True,
default_save_path=None, # backward compatible, todo: remove in v0.8.0
gradient_clip=None, # backward compatible, todo: remove in v0.8.0
nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0
max_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
min_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
use_amp=None, # backward compatible, todo: remove in v0.9.0
show_progress_bar=None, # backward compatible, todo: remove in v0.9.0
nb_sanity_val_steps=None, # backward compatible, todo: remove in v0.8.0
terminate_on_nan: bool = False,
**kwargs
):
r"""
Customize every aspect of training via flags
Args:
logger: Logger (or iterable collection of loggers) for experiment tracking.
checkpoint_callback: Callback for checkpointing.
early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`):
callbacks: Add a list of callbacks.
default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed
default_save_path:
.. warning:: .. deprecated:: 0.7.3
Use `default_root_dir` instead. Will remove 0.9.0.
gradient_clip_val: 0 means don't clip.
gradient_clip:
.. warning:: .. deprecated:: 0.7.0
Use `gradient_clip_val` instead. Will remove 0.9.0.
process_position: orders the tqdm bar when running multiple models on same machine.
num_nodes: number of GPU nodes for distributed training.
nb_gpu_nodes:
.. warning:: .. deprecated:: 0.7.0
Use `num_nodes` instead. Will remove 0.9.0.
gpus: Which GPUs to train on.
auto_select_gpus:
If enabled and `gpus` is an integer, pick available
gpus automatically. This is especially useful when
GPUs are configured to be in "exclusive mode", such
that only one process at a time can access them.
num_tpu_cores: How many TPU cores to train on (1 or 8).
log_gpu_memory: None, 'min_max', 'all'. Might slow performance
show_progress_bar:
.. warning:: .. deprecated:: 0.7.2
Set `progress_bar_refresh_rate` to postive integer to enable. Will remove 0.9.0.
progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.
overfit_pct: How much of training-, validation-, and test dataset to check.
track_grad_norm: -1 no tracking. Otherwise tracks that norm
check_val_every_n_epoch: Check val every n train epochs.
fast_dev_run: runs 1 batch of train, test and val to find any bugs (ie: a sort of unit test).
accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.
max_epochs: Stop training once this number of epochs is reached.
max_nb_epochs:
.. warning:: .. deprecated:: 0.7.0
Use `max_epochs` instead. Will remove 0.9.0.
min_epochs: Force training for at least these many epochs
min_nb_epochs:
.. warning:: .. deprecated:: 0.7.0
Use `min_epochs` instead. Will remove 0.9.0.
max_steps: Stop training after this number of steps. Disabled by default (None).
min_steps: Force training for at least these number of steps. Disabled by default (None).
train_percent_check: How much of training dataset to check.
val_percent_check: How much of validation dataset to check.
test_percent_check: How much of test dataset to check.
val_check_interval: How often within one training epoch to check the validation set
log_save_interval: Writes logs to disk this often
row_log_interval: How often to add logging rows (does not write to disk)
add_row_log_interval:
.. warning:: .. deprecated:: 0.7.0
Use `row_log_interval` instead. Will remove 0.9.0.
distributed_backend: The distributed backend to use.
use_amp:
.. warning:: .. deprecated:: 0.7.0
Use `precision` instead. Will remove 0.9.0.
precision: Full precision (32), half precision (16).
print_nan_grads:
.. warning:: .. deprecated:: 0.7.2
Has no effect. When detected, NaN grads will be printed automatically.
Will remove 0.9.0.
weights_summary: Prints a summary of the weights when training begins.
weights_save_path: Where to save weights if specified. Will override default_root_dir
for checkpoints only. Use this if for whatever reason you need the checkpoints
stored in a different place than the logs written in `default_root_dir`.
amp_level: The optimization level to use (O1, O2, etc...).
num_sanity_val_steps: Sanity check runs n batches of val before starting the training routine.
nb_sanity_val_steps:
.. warning:: .. deprecated:: 0.7.0
Use `num_sanity_val_steps` instead. Will remove 0.8.0.
truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of
resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here.
profiler: To profile individual steps during training and assist in
reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch
auto_lr_find: If set to True, will `initially` run a learning rate finder,
trying to optimize initial learning for faster convergence. Sets learning
rate in self.hparams.lr | self.hparams.learning_rate in the lightning module.
To use a different key, set a string instead of True with the key name.
replace_sampler_ddp: Explicitly enables or disables sampler replacement.
If not specified this will toggled automatically ddp is used
benchmark: If true enables cudnn.benchmark.
terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the
end of each training batch, if any of the parameters or the loss are NaN or +/-inf.
"""
# Init callbacks
self.callbacks = callbacks or []
self.on_init_start()
# benchmarking
self.benchmark = benchmark
torch.backends.cudnn.benchmark = self.benchmark
# Transfer params
self.num_nodes = num_nodes
# Backward compatibility, TODO: remove in v0.8.0
if nb_gpu_nodes is not None:
rank_zero_warn("Argument `nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
self.num_gpu_nodes = nb_gpu_nodes
self.log_gpu_memory = log_gpu_memory
self.gradient_clip_val = gradient_clip_val
# Backward compatibility, TODO: remove in v0.8.0
if gradient_clip is not None:
rank_zero_warn("Argument `gradient_clip` has renamed to `gradient_clip_val` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
self.gradient_clip = gradient_clip
self.progress_bar_refresh_rate = progress_bar_refresh_rate
self.check_val_every_n_epoch = check_val_every_n_epoch
self.track_grad_norm = track_grad_norm
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
# tpu config
self.on_tpu = num_tpu_cores is not None
self.num_tpu_cores = num_tpu_cores
assert num_tpu_cores in [1, 8, None], 'num_tpu_cores can only be 1 or 8'
if num_processes != 1 and distributed_backend != "ddp_cpu":
rank_zero_warn("num_processes is only used for distributed_backend=\"ddp_cpu\". Ignoring it.")
self.num_processes = num_processes
self.process_position = process_position
self.weights_summary = weights_summary
self.max_epochs = max_epochs
# Backward compatibility, TODO: remove in v0.8.0
if max_nb_epochs is not None:
rank_zero_warn("Argument `max_nb_epochs` has renamed to `max_epochs` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
self.max_nb_epochs = max_nb_epochs
self.min_epochs = min_epochs
# Backward compatibility, TODO: remove in v0.8.0
if min_nb_epochs is not None:
rank_zero_warn("Argument `min_nb_epochs` has renamed to `min_epochs` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
self.min_nb_epochs = min_nb_epochs
self.max_steps = max_steps
self.min_steps = min_steps
self.num_sanity_val_steps = num_sanity_val_steps
# Backward compatibility, TODO: remove in v0.8.0
if nb_sanity_val_steps is not None:
rank_zero_warn("Argument `nb_sanity_val_steps` has renamed to "
"`num_sanity_val_steps` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
self.nb_sanity_val_steps = nb_sanity_val_steps
# Backward compatibility, TODO: remove in v0.9.0
if print_nan_grads:
rank_zero_warn("Argument `print_nan_grads` has no effect and will be removed in v0.9.0."
" NaN grads will be printed automatically when detected.", DeprecationWarning)
self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch
self.auto_lr_find = auto_lr_find
self.replace_sampler_ddp = replace_sampler_ddp
self.truncated_bptt_steps = truncated_bptt_steps
self.resume_from_checkpoint = resume_from_checkpoint
self.terminate_on_nan = terminate_on_nan
self.shown_warnings = set()
self.fast_dev_run = fast_dev_run
if self.fast_dev_run:
self.num_sanity_val_steps = 0
self.max_epochs = 1
log.info('Running in fast_dev_run mode: will run a full train,'
' val and test loop using a single batch')
# set default save path if user didn't provide one
self.default_root_dir = default_root_dir
# Backward compatibility, TODO: remove in v0.8.0
if default_save_path is not None:
self.default_root_dir = default_save_path
if self.default_root_dir is None:
self.default_root_dir = os.getcwd()
# training bookeeping
self.total_batch_idx = 0
self.running_loss = TensorRunningAccum(window_length=20)
self.batch_idx = 0
self.tqdm_metrics = {}
self.callback_metrics = {}
self.num_val_batches = 0
self.num_training_batches = 0
self.num_test_batches = 0
self.train_dataloader = None
self.test_dataloaders = None
self.val_dataloaders = None
# training state
self.model = None
self.testing = False
self.disable_validation = False
self.lr_schedulers = []
self.optimizers = None
self.optimizer_frequencies = []
self.global_step = 0
self.current_epoch = 0
self.total_batches = 0
self.interrupted = False
# configure logger
self.configure_logger(logger)
# configure profiler
if profiler is True:
profiler = SimpleProfiler()
self.profiler = profiler or PassThroughProfiler()
# configure early stop callback
# creates a default one if none passed in
self.configure_early_stopping(early_stop_callback)
# configure checkpoint callback
self.checkpoint_callback = checkpoint_callback
self.weights_save_path = weights_save_path
# accumulated grads
self.accumulate_grad_batches = accumulate_grad_batches
self.configure_accumulated_gradients(accumulate_grad_batches)
# for gpus allow int, string and gpu list
if auto_select_gpus and isinstance(gpus, int):
self.gpus = pick_multiple_gpus(gpus)
else:
self.gpus = gpus
self.data_parallel_device_ids = parse_gpu_ids(self.gpus)
self.root_gpu = determine_root_gpu_device(self.data_parallel_device_ids)
self.root_device = torch.device("cpu")
# tpu state flags
self.use_tpu = False
self.tpu_local_core_rank = None
self.tpu_global_core_rank = None
# distributed backend choice
self.distributed_backend = distributed_backend
self.set_distributed_mode(distributed_backend)
# override dist backend when using tpus
if self.on_tpu:
self.init_tpu()
self.current_tpu_idx = None
# init flags for SLURM+ddp to work
self.proc_rank = 0
self.world_size = 1
self.node_rank = 0
self.configure_slurm_ddp(self.num_nodes)
# nvidia setup
self.set_nvidia_flags(self.is_slurm_managing_tasks, self.data_parallel_device_ids)
# can't init progress bar here because starting a new process
# means the progress_bar won't survive pickling
# backward compatibility
if show_progress_bar is not None:
self.show_progress_bar = show_progress_bar
# logging
self.log_save_interval = log_save_interval
self.val_check_interval = val_check_interval
# backward compatibility
if add_row_log_interval is not None:
rank_zero_warn("`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
if not row_log_interval: # in case you did not set the proper value
row_log_interval = add_row_log_interval
self.row_log_interval = row_log_interval
# how much of the data to use
self.overfit_pct = overfit_pct
self.determine_data_use_amount(train_percent_check, val_percent_check,
test_percent_check, overfit_pct)
# 16 bit mixed precision training using apex
self.amp_level = amp_level
self.precision = precision
# Backward compatibility, TODO: remove in v0.9.0
if use_amp is not None:
rank_zero_warn("`use_amp` has been replaced by `precision` since v0.7.0"
" and this argument will be removed in v0.9.0", DeprecationWarning)
self.precision = 16 if use_amp else 32
assert self.precision in (16, 32), 'only 32 or 16 bit precision supported'
if self.precision == 16 and self.num_tpu_cores is None:
use_amp = True
self.init_amp(use_amp)
# Callback system
self.on_init_end()
@property
def slurm_job_id(self) -> int:
try:
job_id = os.environ['SLURM_JOB_ID']
job_id = int(job_id)
except Exception:
job_id = None
return job_id
@classmethod
def default_attributes(cls):
init_signature = inspect.signature(Trainer)
args = {}
for param_name in init_signature.parameters:
value = init_signature.parameters[param_name].default
args[param_name] = value
return args
@classmethod
def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:
r"""Scans the Trainer signature and returns argument names, types and default values.
Returns:
List with tuples of 3 values:
(argument name, set with argument types, argument default value).
Examples:
>>> args = Trainer.get_init_arguments_and_types()
>>> import pprint
>>> pprint.pprint(sorted(args)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
[('accumulate_grad_batches',
(<class 'int'>, typing.Dict[int, int], typing.List[list]),
1),
...
('callbacks',
(typing.List[pytorch_lightning.callbacks.base.Callback],
<class 'NoneType'>),
None),
('check_val_every_n_epoch', (<class 'int'>,), 1),
...
('max_epochs', (<class 'int'>,), 1000),
...
('precision', (<class 'int'>,), 32),
('print_nan_grads', (<class 'bool'>,), False),
('process_position', (<class 'int'>,), 0),
('profiler',
(<class 'pytorch_lightning.profiler.profilers.BaseProfiler'>,
<class 'NoneType'>),
None),
...
"""
trainer_default_params = inspect.signature(cls).parameters
name_type_default = []
for arg in trainer_default_params:
arg_type = trainer_default_params[arg].annotation
arg_default = trainer_default_params[arg].default
try:
arg_types = tuple(arg_type.__args__)
except AttributeError:
arg_types = (arg_type,)
name_type_default.append((arg, arg_types, arg_default))
return name_type_default
@classmethod
def get_deprecated_arg_names(cls) -> List:
"""Returns a list with deprecated Trainer arguments."""
depr_arg_names = []
for name, val in cls.__dict__.items():
if name.startswith('DEPRECATED') and isinstance(val, (tuple, list)):
depr_arg_names.extend(val)
return depr_arg_names
@classmethod
def add_argparse_args(cls, parent_parser: ArgumentParser) -> ArgumentParser:
r"""Extends existing argparse by default `Trainer` attributes.
Args:
parent_parser:
The custom cli arguments parser, which will be extended by
the Trainer default arguments.
Only arguments of the allowed types (str, float, int, bool) will
extend the `parent_parser`.
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False, )
depr_arg_names = cls.get_deprecated_arg_names()
allowed_types = (str, float, int, bool)
# TODO: get "help" from docstring :)
for arg, arg_types, arg_default in (at for at in cls.get_init_arguments_and_types()
if at[0] not in depr_arg_names):
for allowed_type in (at for at in allowed_types if at in arg_types):
if isinstance(allowed_type, bool):
def allowed_type(x):
return bool(distutils.util.strtobool(x))
parser.add_argument(
f'--{arg}',
default=arg_default,
type=allowed_type,
dest=arg,
help='autogenerated by pl.Trainer'
)
break
return parser
@classmethod
def from_argparse_args(cls, args):
params = vars(args)
return cls(**params)
@property
def num_gpus(self) -> int:
gpus = self.data_parallel_device_ids
if gpus is None:
return 0
return len(gpus)
@property
def data_parallel(self) -> bool:
return self.use_dp or self.use_ddp or self.use_ddp2
@property
def training_tqdm_dict(self) -> dict:
"""Read-only for tqdm metrics.
:return:
"""
ref_model = self.model if not self.data_parallel else self.model.module
return dict(**ref_model.get_tqdm_dict(), **self.tqdm_metrics)
@property
def tng_tqdm_dic(self):
"""Read-only for tqdm metrics.
.. warning:: .. deprecated:: 0.5.0
Use `training_tqdm_dict` instead. Will remove 0.8.0.
"""
rank_zero_warn("`tng_tqdm_dic` has renamed to `training_tqdm_dict` since v0.5.0"
" and this method will be removed in v0.8.0", DeprecationWarning)
return self.training_tqdm_dict
# -----------------------------
# MODEL TRAINING
# -----------------------------
def fit(
self,
model: LightningModule,
train_dataloader: Optional[DataLoader] = None,
val_dataloaders: Optional[DataLoader] = None
):
r"""
Runs the full optimization routine.
Args:
model: Model to fit.
train_dataloader: A Pytorch
DataLoader with training samples. If the model has
a predefined train_dataloader method this will be skipped.
val_dataloaders: Either a single
Pytorch Dataloader or a list of them, specifying validation samples.
If the model has a predefined val_dataloaders method this will be skipped
Example::
# Option 1,
# Define the train_dataloader() and val_dataloader() fxs
# in the lightningModule
# RECOMMENDED FOR MOST RESEARCH AND APPLICATIONS TO MAINTAIN READABILITY
trainer = Trainer()
model = LightningModule()
trainer.fit(model)
# Option 2
# in production cases we might want to pass different datasets to the same model
# Recommended for PRODUCTION SYSTEMS
train, val = DataLoader(...), DataLoader(...)
trainer = Trainer()
model = LightningModule()
trainer.fit(model, train_dataloader=train, val_dataloader=val)
# Option 1 & 2 can be mixed, for example the training set can be
# defined as part of the model, and validation can then be feed to .fit()
"""
# bind logger and other properties
model.logger = self.logger
self.copy_trainer_model_properties(model)
# set up the passed in dataloaders (if needed)
self.__attach_dataloaders(model, train_dataloader, val_dataloaders)
# check that model is configured correctly
self.check_model_configuration(model)
# download the data and do whatever transforms we need
# do before any spawn calls so that the model can assign properties
# only on proc 0 because no spawn has happened yet
model.prepare_data()
# Run learning rate finder:
if self.auto_lr_find:
self._run_lr_finder_internally(model)
# route to appropriate start method
# when using multi-node or DDP within a node start each module in a separate process
if self.use_ddp2:
task = int(os.environ['SLURM_LOCALID'])
self.ddp_train(task, model)
elif self.use_ddp:
if self.is_slurm_managing_tasks:
task = int(os.environ['SLURM_LOCALID'])
self.ddp_train(task, model)
else:
self.__set_random_port()
# track for predict
self.model = model
# train
mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,))
# load weights if not interrupted
self.load_spawn_weights(model)
self.model = model
# 1 gpu or dp option triggers training using DP module
# easier to avoid NCCL issues
elif self.use_dp:
self.dp_train(model)
elif self.single_gpu:
self.single_gpu_train(model)
elif self.use_tpu: # pragma: no-cover
log.info(f'training on {self.num_tpu_cores} TPU cores')
# COLAB_GPU is an env var available by default in Colab environments.
start_method = 'fork' if os.getenv('COLAB_GPU') else 'spawn'
# track for predict
self.model = model
# train
xmp.spawn(self.tpu_train, args=(model,), nprocs=self.num_tpu_cores, start_method=start_method)
# load weights if not interrupted
self.load_spawn_weights(model)
self.model = model
# ON CPU
else:
# run through amp wrapper
if self.use_amp:
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.optimizers, self.lr_schedulers, self.optimizer_frequencies = self.init_optimizers(model)
self.run_pretrain_routine(model)
# return 1 when finished
# used for testing or when we need to know that training succeeded
return 1
def __set_random_port(self):
"""
When running DDP NOT managed by SLURM, the ports might collide
:return:
"""
try:
default_port = os.environ['MASTER_PORT']
except Exception:
import random
default_port = random.randint(10000, 19000)
os.environ['MASTER_PORT'] = str(default_port)
def __attach_dataloaders(self, model, train_dataloader=None, val_dataloaders=None, test_dataloaders=None):
# when dataloader is passed via fit, patch the train_dataloader
# functions to overwrite with these implementations
if train_dataloader is not None:
model.train_dataloader = _PatchDataLoader(train_dataloader)
if val_dataloaders is not None:
model.val_dataloader = _PatchDataLoader(val_dataloaders)
if test_dataloaders is not None:
model.test_dataloader = _PatchDataLoader(test_dataloaders)
def run_pretrain_routine(self, model: LightningModule):
"""Sanity check a few things before starting actual training.
Args:
model: The model to run sanity test on.
"""
ref_model = model
if self.data_parallel:
ref_model = model.module
# give model convenience properties
ref_model.trainer = self
# set local properties on the model
self.copy_trainer_model_properties(ref_model)
# log hyper-parameters
if self.logger is not None:
# save exp to get started
if hasattr(ref_model, "hparams"):
self.logger.log_hyperparams(ref_model.hparams)
self.logger.save()
if self.use_ddp or self.use_ddp2:
torch_distrib.barrier()
# wait for all models to restore weights
if self.on_tpu and XLA_AVAILABLE:
# wait for all processes to catch up
torch_xla.core.xla_model.rendezvous("pl.Trainer.run_pretrain_routine")
# register auto-resubmit when on SLURM
self.register_slurm_signal_handlers()
# print model summary
# TODO: remove self.testing condition because model.summarize() is wiping out the weights
if self.proc_rank == 0 and self.weights_summary is not None and not self.testing:
if self.weights_summary in ['full', 'top']:
ref_model.summarize(mode=self.weights_summary)
else:
raise MisconfigurationException("weights_summary can be None, 'full' or 'top'")
# track model now.
# if cluster resets state, the model will update with the saved weights
self.model = model
# set up checkpoint callback
self.configure_checkpoint_callback()
# restore training and model before hpc call
self.restore_weights(model)
# when testing requested only run test and return
if self.testing:
# only load test dataloader for testing
# self.reset_test_dataloader(ref_model)
self.run_evaluation(test_mode=True)
return
# check if we should run validation during training
self.disable_validation = not (self.is_overriden('validation_step') and self.val_percent_check > 0) \
and not self.fast_dev_run
# run tiny validation (if validation defined)
# to make sure program won't crash during val
ref_model.on_sanity_check_start()
if not self.disable_validation and self.num_sanity_val_steps > 0:
self.reset_val_dataloader(ref_model)
# init progress bars for validation sanity check
pbar = tqdm(desc='Validation sanity check',
total=self.num_sanity_val_steps * len(self.val_dataloaders),
leave=False, position=2 * self.process_position,
disable=not self.progress_bar_refresh_rate, dynamic_ncols=True)
self.main_progress_bar = pbar
# dummy validation progress bar
self.val_progress_bar = tqdm(disable=True)
eval_results = self._evaluate(model,
self.val_dataloaders,
self.num_sanity_val_steps,
False)
_, _, _, callback_metrics, _ = self.process_output(eval_results)
# close progress bars
self.main_progress_bar.close()
self.val_progress_bar.close()
# verify that early stop has conditioned on a metric that exists
if self.enable_early_stop:
self.early_stop_callback._validate_condition_metric(callback_metrics)
# init progress bar
pbar = tqdm(leave=True, position=2 * self.process_position,
disable=not self.show_progress_bar, dynamic_ncols=True,
file=sys.stdout, smoothing=0)
self.main_progress_bar = pbar
# clear cache before training
if self.on_gpu:
torch.cuda.empty_cache()
# CORE TRAINING LOOP
self.train()
def test(self, model: Optional[LightningModule] = None, test_dataloaders: Optional[DataLoader] = None):
r"""
Separates from fit to make sure you never run on your test set until you want to.
Args:
model: The model to test.
test_dataloaders: Either a single
Pytorch Dataloader or a list of them, specifying validation samples.
Example::
# Option 1
# run test after fitting
test = DataLoader(...)
trainer = Trainer()
model = LightningModule()
trainer.fit(model)
trainer.test(test_dataloaders=test)
# Option 2
# run test from a loaded model
test = DataLoader(...)
model = LightningModule.load_from_checkpoint('path/to/checkpoint.ckpt')
trainer = Trainer()
trainer.test(model, test_dataloaders=test)
"""
self.testing = True
if test_dataloaders is not None:
if model:
self.__attach_dataloaders(model, test_dataloaders=test_dataloaders)
else:
self.__attach_dataloaders(self.model, test_dataloaders=test_dataloaders)
# give proper warnings if user only passed in loader without hooks
self.check_testing_model_configuration(model if model else self.model)
if model is not None:
self.model = model
self.fit(model)
elif self.use_ddp or self.use_tpu: # pragma: no-cover
# attempt to load weights from a spawn
path = os.path.join(self.default_root_dir, '__temp_weight_ddp_end.ckpt')
test_model = self.model
if os.path.exists(path):
test_model = self.load_spawn_weights(self.model)
self.fit(test_model)
else:
self.run_evaluation(test_mode=True)
self.testing = False
def check_model_configuration(self, model: LightningModule):
r"""
Checks that the model is configured correctly before training is started.
Args:
model: The model to test.
"""
# Check training_step, train_dataloader, configure_optimizer methods
if not self.is_overriden('training_step', model):
raise MisconfigurationException(
'No `training_step()` method defined. Lightning `Trainer` expects as minimum a'
' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')
if not self.is_overriden('train_dataloader', model):
raise MisconfigurationException(
'No `train_dataloader()` method defined. Lightning `Trainer` expects as minimum a'
' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')
if not self.is_overriden('configure_optimizers', model):
raise MisconfigurationException(
'No `configure_optimizers()` method defined. Lightning `Trainer` expects as minimum a'
' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')
# Check val_dataloader, validation_step and validation_epoch_end
if self.is_overriden('val_dataloader', model):
if not self.is_overriden('validation_step', model):
raise MisconfigurationException('You have passed in a `val_dataloader()`'
' but have not defined `validation_step()`.')
else:
if not self.is_overriden('validation_epoch_end', model):
rank_zero_warn(
'You have defined a `val_dataloader()` and have defined a `validation_step()`,'
' you may also want to define `validation_epoch_end()` for accumulating stats.',
RuntimeWarning
)
else:
if self.is_overriden('validation_step', model):
raise MisconfigurationException('You have defined `validation_step()`,'
' but have not passed in a val_dataloader().')
# Check test_dataloader, test_step and test_epoch_end
if self.is_overriden('test_dataloader', model):
if not self.is_overriden('test_step', model):
raise MisconfigurationException('You have passed in a `test_dataloader()`'
' but have not defined `test_step()`.')
else:
if not self.is_overriden('test_epoch_end', model):
rank_zero_warn(
'You have defined a `test_dataloader()` and have defined a `test_step()`, you may also want to'
' define `test_epoch_end()` for accumulating stats.', RuntimeWarning
)
def check_testing_model_configuration(self, model: LightningModule):
has_test_step = self.is_overriden('test_step', model)
has_test_epoch_end = self.is_overriden('test_epoch_end', model)
gave_test_loader = hasattr(model, 'test_dataloader') and model.test_dataloader()
if gave_test_loader and not has_test_step:
raise MisconfigurationException('You passed in a `test_dataloader` but did not implement `test_step()`')
if has_test_step and not gave_test_loader:
raise MisconfigurationException('You defined `test_step()` but did not implement'
' `test_dataloader` nor passed in `.fit(test_dataloaders`.')
if has_test_step and gave_test_loader and not has_test_epoch_end:
rank_zero_warn(
'You passed in a `test_dataloader` and have defined a `test_step()`, you may also want to'
' define `test_epoch_end()` for accumulating stats.', RuntimeWarning
)
class _PatchDataLoader(object):
r"""
Callable object for patching dataloaders passed into trainer.fit().
Use this class to override model.*_dataloader() and be pickle-compatible.
Args:
dataloader: Dataloader object to return when called.
"""
def __init__(self, dataloader: Union[List[DataLoader], DataLoader]):
self.dataloader = dataloader
# cannot pickle __code__ so cannot verify if PatchDataloader
# exists which shows dataloader methods have been overwritten.
# so, we hack it by using the string representation
self.patch_loader_code = str(self.__call__.__code__)
def __call__(self) -> Union[List[DataLoader], DataLoader]:
return self.dataloader
| 39.856874 | 119 | 0.6293 |
4a2706f92aedc8a27df4f80c14d6dbda08fe52fb | 2,235 | py | Python | kernel/examples/handler/component/feature/vert_soften.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 39 | 2021-10-12T01:43:27.000Z | 2022-03-28T04:46:35.000Z | kernel/examples/handler/component/feature/vert_soften.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 6 | 2021-10-14T02:11:47.000Z | 2022-03-23T02:41:50.000Z | kernel/examples/handler/component/feature/vert_soften.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 10 | 2021-10-14T09:36:03.000Z | 2022-02-10T11:05:12.000Z | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.python.utils import log_utils
from kernel.components.feature.soften.vertsoften.param import VertFeatureSoftenParam
from kernel.examples.handler.component.component_base import Component
from kernel.examples.handler.interface import Input
from kernel.examples.handler.interface import Output
LOGGER = log_utils.get_logger()
class VertSoftenParameter(VertFeatureSoftenParam):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def check(self):
return True
class VertSoften(Component, VertSoftenParameter):
def __init__(self, **kwargs):
Component.__init__(self, **kwargs)
# print (self.name)
LOGGER.debug(f"{self.name} component created")
new_kwargs = self.erase_component_base_param(**kwargs)
VertSoftenParameter.__init__(self, **new_kwargs)
self.input = Input(self.name)
self.output = Output(self.name, data_type='single')
self._module_name = "VertSoften"
self._param_name = "VertFeatureSoftenParam"
| 37.25 | 84 | 0.746309 |
4a2707312c8f112f66616bab1a593625aac53755 | 3,091 | py | Python | openGaussBase/testcase/KEYWORDS/current_user/Opengauss_Function_Keyword_Current_user_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/KEYWORDS/current_user/Opengauss_Function_Keyword_Current_user_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/KEYWORDS/current_user/Opengauss_Function_Keyword_Current_user_Case0020.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
#-- @testpoint:opengauss关键字current_user(保留),作为目录对象名(默认只允许初始化用户创建)
'''
import unittest
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class Hostname(unittest.TestCase):
def setUp(self):
logger.info("------------------------ Opengauss_Function_KeyWord_Current_user_Case0020 开始执行--------------------------")
def test_current_user_1(self):
logger.info("------------------------关键字作为目录对象名不带引号-合理报错--------------------------")
SqlMdg = commonsh.execut_db_sql('''create directory current_user as '/tmp/';
drop directory current_user;''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
logger.info("------------------------关键字作为目录对象名带双引号-成功--------------------------")
SqlMdg = commonsh.execut_db_sql('create directory \\\"current_user\\\" as \'/tmp/\';'
'drop directory \\\"current_user\\\"; ')
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
logger.info(SqlMdg)
self.assertIn(constant.CREATE_DIRECTORY_SUCCESS_MSG, SqlMdg)
self.assertIn(constant.DROP_DIRECTORY_SUCCESS_MSG, SqlMdg)
logger.info("------------------------关键字作为目录对象名带单引号 - 合理报错--------------------------")
SqlMdg = commonsh.execut_db_sql('''drop directory if exists 'current_user';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql(''' create directory 'current_user' as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
logger.info("------------------------关键字作为目录对象名带反引号 - 合理报错--------------------------")
SqlMdg = commonsh.execut_db_sql('''drop directory if exists \`current_user\`;''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
SqlMdg = commonsh.execut_db_sql('''create directory \`current_user\` as '/tmp/';''')
logger.info(SqlMdg)
self.assertIn(constant.SYNTAX_ERROR_MSG, SqlMdg)
def tearDown(self):
logger.info('------------------------ Opengauss_Function_KeyWord_Current_user_Case0020 执行结束--------------------------')
| 46.134328 | 128 | 0.621805 |
4a270775a7083975eb4cdf9f0584b7c410018d09 | 3,531 | py | Python | tests/test_utility/test_file_utils/test.py | ikegawa-koshi/avalon | 09ccad29f953341078e767053646f41c8c800237 | [
"Apache-2.0"
] | 127 | 2019-10-25T08:43:26.000Z | 2022-03-20T15:33:32.000Z | tests/test_utility/test_file_utils/test.py | ikegawa-koshi/avalon | 09ccad29f953341078e767053646f41c8c800237 | [
"Apache-2.0"
] | 275 | 2019-10-24T23:36:21.000Z | 2022-01-24T20:38:07.000Z | tests/test_utility/test_file_utils/test.py | ikegawa-koshi/avalon | 09ccad29f953341078e767053646f41c8c800237 | [
"Apache-2.0"
] | 110 | 2019-10-30T07:09:25.000Z | 2022-01-28T09:40:44.000Z | # Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import unittest
from utility.file_utils import (
write_result_data_to_json_file,
find_file_in_paths,
read_json_file,
)
FILE_PATH = "./test_file_utils"
class FileUtilsTest(unittest.TestCase):
def test_find_file_in_paths(self):
"""Tests to verify file_in_path(filename, search_paths) function
"""
self.assertEqual(FILE_PATH + "/path_test_file",
find_file_in_paths("path_test_file", [FILE_PATH]))
self.assertEqual(FILE_PATH + "/path_test_file",
find_file_in_paths("path_test_file",
["./", FILE_PATH]))
pwd = os.getcwd()
os.chdir(FILE_PATH)
self.assertEqual("./path_test_file",
find_file_in_paths("./path_test_file", ["./"]))
self.assertEqual("./path_test_file",
find_file_in_paths("path_test_file", ["./"]))
os.chdir(pwd)
self.assertRaises(FileNotFoundError, find_file_in_paths,
"path_test_file", ["./"])
def test_read_json_file(self):
"""Tests to verify read_json_file(input_file,data_dir) function
"""
input_json = read_json_file(
"sample1.json", [FILE_PATH]) # positive case
self.assertEqual(input_json, '{"field1": 1,"field2": 2}')
input_json = read_json_file(
"sample2.json", [FILE_PATH]) # positive case
self.assertEqual(input_json, '{"field1":1,"field2":2}')
input_json = read_json_file(
"sample3.json", [FILE_PATH]) # positive case
self.assertEqual(input_json,
'{1:"one",2:"two",3:["one","two","three"]}')
def test_write_result_data_to_json_file(self):
"""Tests to verify function
write_result_data_to_json_file(file_name,input_data, data_dir ='./')
"""
input_data = '{"result":1,"field2":2}'
file_name = "write_sample.json"
write_result_data_to_json_file(file_name, input_data)
read_json = read_json_file(
"write_sample.json", ["./"]) # with extension case
self.assertEqual('{"Result": 1}', read_json)
try:
os.remove(file_name)
except OSError:
pass
file_name = "write_sample"
write_result_data_to_json_file(file_name, input_data)
read_json = read_json_file(
"write_sample.json", ["./"]) # positive case
self.assertEqual('{"Result": 1}', read_json)
try:
os.remove(file_name + ".json")
except OSError:
pass
input_data = '{"field1":1,"field2":2}' # No attribute 'result'
file_name = "write_sample.json"
self.assertRaises(ValueError, write_result_data_to_json_file,
file_name, input_data)
| 38.802198 | 80 | 0.601813 |
4a2707df6e8caed8caf622b3ac46cc792f1ee83e | 641 | py | Python | batch3/outputs/lensing-ommh2.py | sjoudaki/CosmoJBD | 3c1d029b74034b92cb2974de15e4c18637a5277e | [
"MIT"
] | null | null | null | batch3/outputs/lensing-ommh2.py | sjoudaki/CosmoJBD | 3c1d029b74034b92cb2974de15e4c18637a5277e | [
"MIT"
] | null | null | null | batch3/outputs/lensing-ommh2.py | sjoudaki/CosmoJBD | 3c1d029b74034b92cb2974de15e4c18637a5277e | [
"MIT"
] | null | null | null | import planckStyle as s
g = s.getSinglePlotter()
g.settings.solid_colors = [('#8CD3F5', '#006FED'), ('#F7BAA6', '#E03424'), 'g', 'cadetblue', 'olive',
'darkcyan']
roots = []
roots.append('base_lensing_lenspriors')
roots.append('base_plikHM_TE_lowE')
roots.append('base_plikHM_TT_lowl_lowE')
roots.append('base_plikHM_TTTEEE_lowl_lowE_lensing')
g.plot_2d(roots, [u'omegamh2', u's8omegamp25'], filled=True, shaded=False)
g.add_legend(
[s.planck + ' ' + s.lensonly, s.datalabel[s.defdata_TE], s.planckTT, s.planckall + '+lensing'],
colored_text=True, legend_loc='lower right', align_right=True)
g.export()
| 35.611111 | 101 | 0.692668 |
4a270893820360696545a5a30d78504c4a07b342 | 153 | py | Python | webapp.py | RouSage/diploma-webapp | 7fc7f477a9b45f85223a2aa72fdf9a7f3f567b26 | [
"MIT"
] | null | null | null | webapp.py | RouSage/diploma-webapp | 7fc7f477a9b45f85223a2aa72fdf9a7f3f567b26 | [
"MIT"
] | 4 | 2021-03-30T15:28:13.000Z | 2022-03-11T23:44:28.000Z | webapp.py | RouSage/diploma-webapp | 7fc7f477a9b45f85223a2aa72fdf9a7f3f567b26 | [
"MIT"
] | null | null | null | from app import app, db, cli
from app.models import Image
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'Image': Image}
| 19.125 | 37 | 0.732026 |
4a270a7d1ad22dc927894adfcfb32ba3245ac343 | 12,789 | py | Python | dipy/reconst/tests/test_shm.py | ChantalTax/dipy | da656ca630934a79e5eabd4aee64f8f0ae05bf95 | [
"BSD-3-Clause"
] | null | null | null | dipy/reconst/tests/test_shm.py | ChantalTax/dipy | da656ca630934a79e5eabd4aee64f8f0ae05bf95 | [
"BSD-3-Clause"
] | null | null | null | dipy/reconst/tests/test_shm.py | ChantalTax/dipy | da656ca630934a79e5eabd4aee64f8f0ae05bf95 | [
"BSD-3-Clause"
] | null | null | null | """Test spherical harmonic models and the tools associated with those models"""
import numpy as np
import numpy.linalg as npl
from nose.tools import assert_equal, assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.core.sphere import hemi_icosahedron
from dipy.core.gradients import gradient_table
from dipy.sims.voxel import single_tensor
from dipy.reconst.peaks import peak_directions
from dipy.reconst.shm import sf_to_sh, sh_to_sf
from dipy.reconst.interpolate import NearestNeighborInterpolator
from dipy.sims.voxel import multi_tensor_odf
from dipy.data import mrtrix_spherical_functions
from dipy.reconst import odf
from dipy.reconst.shm import (real_sph_harm, real_sym_sh_basis,
real_sym_sh_mrtrix, sph_harm_ind_list,
OpdtModel, normalize_data, hat, lcr_matrix,
smooth_pinv, bootstrap_data_array,
bootstrap_data_voxel, ResidualBootstrapWrapper,
CsaOdfModel, QballModel, SphHarmFit)
def test_sph_harm_ind_list():
m_list, n_list = sph_harm_ind_list(8)
assert_equal(m_list.shape, n_list.shape)
assert_equal(m_list.shape, (45,))
assert_true(np.all(np.abs(m_list) <= n_list))
assert_array_equal(n_list % 2, 0)
assert_raises(ValueError, sph_harm_ind_list, 1)
def test_real_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
# where real spherical harmonic $Y^m_n$ is defined to be:
# Real($Y^m_n$) * sqrt(2) if m > 0
# $Y^m_n$ if m == 0
# Imag($Y^m_n$) * sqrt(2) if m < 0
rsh = real_sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
assert_array_almost_equal(rsh(0, 0, 0, 0),
0.5 / sqrt(pi))
assert_array_almost_equal(rsh(-2, 2, pi / 5, pi / 3),
0.25 * sqrt(15. / (2. * pi)) *
(sin(pi / 5.)) ** 2. * cos(0 + 2. * pi / 3) *
sqrt(2))
assert_array_almost_equal(rsh(2, 2, pi / 5, pi / 3),
-1 * 0.25 * sqrt(15. / (2. * pi)) *
(sin(pi / 5.)) ** 2. * sin(0 - 2. * pi / 3) *
sqrt(2))
assert_array_almost_equal(rsh(-2, 2, pi / 2, pi),
0.25 * sqrt(15 / (2. * pi)) *
cos(2. * pi) * sin(pi / 2.) ** 2. * sqrt(2))
assert_array_almost_equal(rsh(2, 4, pi / 3., pi / 4.),
-1 * (3. / 8.) * sqrt(5. / (2. * pi)) *
sin(0 - 2. * pi / 4.) *
sin(pi / 3.) ** 2. *
(7. * cos(pi / 3.) ** 2. - 1) * sqrt(2))
assert_array_almost_equal(rsh(-4, 4, pi / 6., pi / 8.),
(3. / 16.) * sqrt(35. / (2. * pi)) *
cos(0 + 4. * pi / 8.) * sin(pi / 6.) ** 4. *
sqrt(2))
assert_array_almost_equal(rsh(4, 4, pi / 6., pi / 8.),
-1 * (3. / 16.) * sqrt(35. / (2. * pi)) *
sin(0 - 4. * pi / 8.) * sin(pi / 6.) ** 4. *
sqrt(2))
aa = np.ones((3, 1, 1, 1))
bb = np.ones((1, 4, 1, 1))
cc = np.ones((1, 1, 5, 1))
dd = np.ones((1, 1, 1, 6))
assert_equal(rsh(aa, bb, cc, dd).shape, (3, 4, 5, 6))
def test_real_sym_sh_mrtrix():
coef, expected, sphere = mrtrix_spherical_functions()
basis, m, n = real_sym_sh_mrtrix(8, sphere.theta, sphere.phi)
func = np.dot(coef, basis.T)
assert_array_almost_equal(func, expected, 4)
def test_real_sym_sh_basis():
# This test should do for now
# The mrtrix basis should be the same as re-ordering and re-scaling the
# fibernav basis
new_order = [0, 5, 4, 3, 2, 1, 14, 13, 12, 11, 10, 9, 8, 7, 6]
sphere = hemi_icosahedron.subdivide(2)
basis, m, n = real_sym_sh_mrtrix(4, sphere.theta, sphere.phi)
expected = basis[:, new_order]
expected *= np.where(m == 0, 1., np.sqrt(2))
fibernav_basis, m, n = real_sym_sh_basis(4, sphere.theta, sphere.phi)
assert_array_almost_equal(fibernav_basis, expected)
def test_smooth_pinv():
hemi = hemi_icosahedron.subdivide(2)
m, n = sph_harm_ind_list(4)
B = real_sph_harm(m, n, hemi.theta[:, None], hemi.phi[:, None])
L = np.zeros(len(m))
C = smooth_pinv(B, L)
D = np.dot(npl.inv(np.dot(B.T, B)), B.T)
assert_array_almost_equal(C, D)
L = n * (n + 1) * .05
C = smooth_pinv(B, L)
L = np.diag(L)
D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T)
assert_array_almost_equal(C, D)
L = np.arange(len(n)) * .05
C = smooth_pinv(B, L)
L = np.diag(L)
D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T)
assert_array_almost_equal(C, D)
def test_normalize_data():
sig = np.arange(1, 66)[::-1]
where_b0 = np.zeros(65, 'bool')
where_b0[0] = True
d = normalize_data(sig, where_b0, 1)
assert_raises(ValueError, normalize_data, sig, where_b0, out=sig)
norm_sig = normalize_data(sig, where_b0, min_signal=1)
assert_array_almost_equal(norm_sig, sig / 65.)
norm_sig = normalize_data(sig, where_b0, min_signal=5)
assert_array_almost_equal(norm_sig[-5:], 5 / 65.)
where_b0[[0, 1]] = [True, True]
norm_sig = normalize_data(sig, where_b0, min_signal=1)
assert_array_almost_equal(norm_sig, sig / 64.5)
norm_sig = normalize_data(sig, where_b0, min_signal=5)
assert_array_almost_equal(norm_sig[-5:], 5 / 64.5)
sig = sig * np.ones((2, 3, 1))
where_b0[[0, 1]] = [True, False]
norm_sig = normalize_data(sig, where_b0, min_signal=1)
assert_array_almost_equal(norm_sig, sig / 65.)
norm_sig = normalize_data(sig, where_b0, min_signal=5)
assert_array_almost_equal(norm_sig[..., -5:], 5 / 65.)
where_b0[[0, 1]] = [True, True]
norm_sig = normalize_data(sig, where_b0, min_signal=1)
assert_array_almost_equal(norm_sig, sig / 64.5)
norm_sig = normalize_data(sig, where_b0, min_signal=5)
assert_array_almost_equal(norm_sig[..., -5:], 5 / 64.5)
def make_fake_signal():
hemisphere = hemi_icosahedron.subdivide(2)
bvecs = np.concatenate(([[0, 0, 0]], hemisphere.vertices))
bvals = np.zeros(len(bvecs)) + 2000
bvals[0] = 0
gtab = gradient_table(bvals, bvecs)
evals = np.array([[2.1, .2, .2], [.2, 2.1, .2]]) * 10 ** -3
evecs0 = np.eye(3)
sq3 = np.sqrt(3) / 2.
evecs1 = np.array([[sq3, .5, 0],
[.5, sq3, 0],
[0, 0, 1.]])
evecs1 = evecs0
a = evecs0[0]
b = evecs1[1]
S1 = single_tensor(gtab, .55, evals[0], evecs0)
S2 = single_tensor(gtab, .45, evals[1], evecs1)
return S1 + S2, gtab, np.vstack([a, b])
class TestQballModel(object):
model = QballModel
def test_single_voxel_fit(self):
signal, gtab, expected = make_fake_signal()
sphere = hemi_icosahedron.subdivide(4)
model = self.model(gtab, sh_order=4, min_signal=1e-5,
assume_normed=True)
fit = model.fit(signal)
odf = fit.odf(sphere)
assert_equal(odf.shape, sphere.phi.shape)
directions, _, _ = peak_directions(odf, sphere)
# Check the same number of directions
n = len(expected)
assert_equal(len(directions), n)
# Check directions are unit vectors
cos_similarity = (directions * directions).sum(-1)
assert_array_almost_equal(cos_similarity, np.ones(n))
# Check the directions == expected or -expected
cos_similarity = (directions * expected).sum(-1)
assert_array_almost_equal(abs(cos_similarity), np.ones(n))
# Test normalize data
model = self.model(gtab, sh_order=4, min_signal=1e-5,
assume_normed=False)
fit = model.fit(signal * 5)
odf_with_norm = fit.odf(sphere)
assert_array_almost_equal(odf, odf_with_norm)
def test_mulit_voxel_fit(self):
signal, gtab, expected = make_fake_signal()
sphere = hemi_icosahedron
nd_signal = np.vstack([signal, signal])
model = self.model(gtab, sh_order=4, min_signal=1e-5,
assume_normed=True)
fit = model.fit(nd_signal)
odf = fit.odf(sphere)
assert_equal(odf.shape, (2,) + sphere.phi.shape)
# Test fitting with mask, where mask is False odf should be 0
fit = model.fit(nd_signal, mask=[False, True])
odf = fit.odf(sphere)
assert_array_equal(odf[0], 0.)
def test_sh_order(self):
signal, gtab, expected = make_fake_signal()
model = self.model(gtab, sh_order=4, min_signal=1e-5)
assert_equal(model.B.shape[1], 15)
assert_equal(max(model.n), 4)
model = self.model(gtab, sh_order=6, min_signal=1e-5)
assert_equal(model.B.shape[1], 28)
assert_equal(max(model.n), 6)
def test_gfa(self):
signal, gtab, expected = make_fake_signal()
signal = np.ones((2, 3, 4, 1)) * signal
sphere = hemi_icosahedron.subdivide(3)
model = self.model(gtab, 6, min_signal=1e-5)
fit = model.fit(signal)
gfa_shm = fit.gfa
gfa_odf = odf.gfa(fit.odf(sphere))
assert_array_almost_equal(gfa_shm, gfa_odf, 3)
def test_SphHarmFit():
coef = np.zeros((3, 4, 5, 45))
mask = np.zeros((3, 4, 5), dtype=bool)
fit = SphHarmFit(None, coef, mask)
item = fit[0, 0, 0]
assert_equal(item.shape, ())
slice = fit[0]
assert_equal(slice.shape, (4, 5))
slice = fit[..., 0]
assert_equal(slice.shape, (3, 4))
class TestOpdtModel(TestQballModel):
model = OpdtModel
class TestCsaOdfModel(TestQballModel):
model = CsaOdfModel
def test_hat_and_lcr():
hemi = hemi_icosahedron.subdivide(3)
m, n = sph_harm_ind_list(8)
B = real_sph_harm(m, n, hemi.theta[:, None], hemi.phi[:, None])
H = hat(B)
B_hat = np.dot(H, B)
assert_array_almost_equal(B, B_hat)
R = lcr_matrix(H)
d = np.arange(len(hemi.theta))
r = d - np.dot(H, d)
lev = np.sqrt(1 - H.diagonal())
r /= lev
r -= r.mean()
r2 = np.dot(R, d)
assert_array_almost_equal(r, r2)
r3 = np.dot(d, R.T)
assert_array_almost_equal(r, r3)
def test_bootstrap_array():
B = np.array([[4, 5, 7, 4, 2.],
[4, 6, 2, 3, 6.]])
H = hat(B.T)
R = np.zeros((5, 5))
d = np.arange(1, 6)
dhat = np.dot(H, d)
assert_array_almost_equal(bootstrap_data_voxel(dhat, H, R), dhat)
assert_array_almost_equal(bootstrap_data_array(dhat, H, R), dhat)
H = np.zeros((5, 5))
def test_ResidualBootstrapWrapper():
B = np.array([[4, 5, 7, 4, 2.],
[4, 6, 2, 3, 6.]])
B = B.T
H = hat(B)
d = np.arange(10) / 8.
d.shape = (2, 5)
dhat = np.dot(d, H)
signal_object = NearestNeighborInterpolator(dhat, (1,))
ms = .2
where_dwi = np.ones(len(H), dtype=bool)
boot_obj = ResidualBootstrapWrapper(signal_object, B, where_dwi, ms)
assert_array_almost_equal(boot_obj[0], dhat[0].clip(ms, 1))
assert_array_almost_equal(boot_obj[1], dhat[1].clip(ms, 1))
dhat = np.column_stack([[.6, .7], dhat])
signal_object = NearestNeighborInterpolator(dhat, (1,))
where_dwi = np.concatenate([[False], where_dwi])
boot_obj = ResidualBootstrapWrapper(signal_object, B, where_dwi, ms)
assert_array_almost_equal(boot_obj[0], dhat[0].clip(ms, 1))
assert_array_almost_equal(boot_obj[1], dhat[1].clip(ms, 1))
def test_sf_to_sh():
# Subdividing a hemi_icosahedron twice produces 81 unique points, which
# is more than enough to fit a order 8 (45 coefficients) spherical harmonic
sphere = hemi_icosahedron.subdivide(2)
mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]))
angles = [(0, 0), (90, 0)]
odf = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])
# 1D case with the 3 bases functions
odf_sh = sf_to_sh(odf, sphere, 8)
odf2 = sh_to_sf(odf_sh, sphere, 8)
assert_array_almost_equal(odf, odf2, 2)
odf_sh = sf_to_sh(odf, sphere, 8, "mrtrix")
odf2 = sh_to_sf(odf_sh, sphere, 8, "mrtrix")
assert_array_almost_equal(odf, odf2, 2)
odf_sh = sf_to_sh(odf, sphere, 8, "fibernav")
odf2 = sh_to_sf(odf_sh, sphere, 8, "fibernav")
assert_array_almost_equal(odf, odf2, 2)
# 2D case
odf2d = np.vstack((odf2, odf))
odf2d_sh = sf_to_sh(odf2d, sphere, 8)
odf2d_sf = sh_to_sf(odf2d_sh, sphere, 8)
assert_array_almost_equal(odf2d, odf2d_sf, 2)
if __name__ == "__main__":
import nose
nose.runmodule()
| 34.847411 | 79 | 0.591837 |
4a270a9bfcba898e87481910c36d9638e49bff72 | 1,473 | py | Python | backend/users/models.py | JoaoAPS/AlugaInstrumentos | f4001f439c4f96c4de2b194ce268b9d7f95e4512 | [
"MIT"
] | null | null | null | backend/users/models.py | JoaoAPS/AlugaInstrumentos | f4001f439c4f96c4de2b194ce268b9d7f95e4512 | [
"MIT"
] | null | null | null | backend/users/models.py | JoaoAPS/AlugaInstrumentos | f4001f439c4f96c4de2b194ce268b9d7f95e4512 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email: str, name: str, password: str):
"""Create and save a new user"""
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email: str, name: str, password: str):
"""Create and save a new superuser"""
user = self.create_user(email=email, name=name, password=password)
user.is_admin = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def __str__(self):
return self.email
@property
def is_staff(self):
"""Is the user a member of staff?"""
return self.is_admin
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
return True
| 28.882353 | 74 | 0.674134 |
4a270b7d5568667b39eb1ed26f200856cfa041b8 | 6,157 | py | Python | python/snapy/netsnmp/unittests/__init__.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | python/snapy/netsnmp/unittests/__init__.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | python/snapy/netsnmp/unittests/__init__.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | # snapy - a python snmp library
#
# Copyright (C) 2009 ITA Software, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import socket
import signal
import warnings
import twisted
from twisted.internet import defer, error, process, protocol, reactor
from twisted.python import log, versions
from twisted.trial import unittest
def pick_a_port():
# XXX: Not perfect, there is a race condition between
# the close and snmpd's bind. However the other way
# would be to hook into snmpd's bind() call...
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
host, port = sock.getsockname()
sock.close()
return port
class LoggingProtocol(protocol.ProcessProtocol):
"""Log snmpd output via the twisted logging api"""
def __init__(self, factory):
self.factory = factory
def outReceived(self, data):
self.factory.stdout += data
for line in data.splitlines():
log.msg("snmpd: %s" % line)
def errReceived(self, data):
self.factory.stderr += data
for line in data.splitlines():
if line.startswith("NET-SNMP"):
self.factory.started()
log.err("snmpd: %s" % line)
def processEnded(self, status):
if isinstance(status.value, error.ProcessDone):
log.msg("snmpd: exit(0)")
self.factory.done(None)
elif isinstance(status.value, error.ProcessTerminated):
log.err("snmpd: exit(%s)" % status.value.exitCode)
self.factory.done(status)
else:
log.err("snmpd: %s" % status)
self.factory.done(status)
class Server(process.Process):
"""Run snmpd"""
# Limit snmpd to only load these modules, this speeds things up
modules = ('override', 'hr_system', 'system_mib')
def __init__(self):
self._deferred = defer.Deferred()
self._address = defer.Deferred()
self._timeout = None
self.conf = "%s/snmpd.conf" % os.path.dirname(__file__)
self.socket = "udp:127.0.0.1:%d" % pick_a_port()
self.stdout = ""
self.stderr = ""
proto = LoggingProtocol(self)
env = {"PATH": "/bin:/sbin:/usr/bin:/usr/sbin"}
cmd = ("snmpd", "-f", "-C", "-c", self.conf,
"-LE7", "-Ddumpv_recv", "-Ddumph_recv",
"-I", ','.join(self.modules),
"--noPersistentLoad=1", "--noPersistentSave=1",
self.socket)
# Skip test if snmpd doesn't exist
found = False
for path in env['PATH'].split(':'):
if os.path.exists("%s/%s" % (path, cmd[0])):
found = True
break
if not found:
raise unittest.SkipTest("snmpd missing")
super(Server, self).__init__(reactor, cmd[0], cmd, env, None, proto)
def started(self):
log.msg("Ready, snmpd listening on %s" % self.socket)
self._address.callback(self.socket)
def address(self):
return self._address
def stop(self):
assert self.pid and self._deferred
log.msg("Stopping snmpd...")
os.kill(self.pid, signal.SIGTERM)
self._timeout = reactor.callLater(5.0, self.timeout)
return self._deferred
def timeout(self):
assert self.pid
log.msg("Timeout, Killing snmpd...")
os.kill(self.pid, signal.SIGKILL)
self._timeout = None
def done(self, status):
assert self._deferred
if not self._address.called:
self._address.errback(Exception("snmpd failed"))
if self._timeout:
self._timeout.cancel()
self._timeout = None
self._deferred.callback(status)
self._deferred = None
class TestCase(unittest.TestCase):
def setUp(self):
# Twisted < 10.0.0 falsely raises it's zombie warning during tests
if twisted.version < versions.Version("twisted", 10, 0, 0):
warnings.simplefilter("ignore", error.PotentialZombieWarning)
self._running = False
def set_running(result):
self._running = True
self.server = Server()
d = self.server.address()
d.addCallbacks(self.setUpSession, lambda x: None)
d.addCallback(lambda x: self._set_running(True))
d.addErrback(lambda x: self.server.stop())
return d
def _set_running(self, value):
# since we can't do this in lambda
self._running = value
def setUpSession(self, address):
pass
def tearDown(self):
if not self._running:
return
try:
self.tearDownSession()
finally:
d = self.server.stop()
d.addCallback(lambda x: self._set_running(False))
return d
def tearDownSession(self):
pass
def assertVersion(self, version):
self.assertIn("\ndumph_recv: SNMPv%s message\n" % version,
self.server.stderr)
def assertCommand(self, command):
self.assertIn("\ndumpv_recv: Command %s\n" % command,
self.server.stderr)
def finish(self, commands=()):
def checks(result):
self.assertVersion(self.version)
for command in commands:
self.assertCommand(command)
d = self.tearDown()
d.addCallback(checks)
return d
def finishGet(self):
return self.finish(["GET"])
def finishWalk(self):
if self.bulk:
return self.finish(["GET","GETBULK"])
else:
return self.finish(["GET","GETNEXT"])
def finishStrictWalk(self):
if self.bulk:
return self.finish(["GETBULK"])
else:
return self.finish(["GETNEXT"])
| 30.631841 | 76 | 0.600455 |
4a270d52ed62b8be666ae6536eb09180df7981dd | 1,949 | py | Python | ortools/sat/samples/optional_interval_sample_sat.py | AlohaChina/or-tools | 1ece0518104db435593a1a21882801ab6ada3e15 | [
"Apache-2.0"
] | 8,273 | 2015-02-24T22:10:50.000Z | 2022-03-31T21:19:27.000Z | ortools/sat/samples/optional_interval_sample_sat.py | AlohaChina/or-tools | 1ece0518104db435593a1a21882801ab6ada3e15 | [
"Apache-2.0"
] | 2,530 | 2015-03-05T04:27:21.000Z | 2022-03-31T06:13:02.000Z | ortools/sat/samples/optional_interval_sample_sat.py | AlohaChina/or-tools | 1ece0518104db435593a1a21882801ab6ada3e15 | [
"Apache-2.0"
] | 2,057 | 2015-03-04T15:02:02.000Z | 2022-03-30T02:29:27.000Z | #!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code sample to demonstrates how to build an optional interval."""
from ortools.sat.python import cp_model
def OptionalIntervalSampleSat():
"""Showcases how to build optional interval variables."""
model = cp_model.CpModel()
horizon = 100
# An interval can be created from three affine expressions.
start_var = model.NewIntVar(0, horizon, 'start')
duration = 10 # Python cp/sat code accept integer variables or constants.
end_var = model.NewIntVar(0, horizon, 'end')
presence_var = model.NewBoolVar('presence')
interval_var = model.NewOptionalIntervalVar(start_var, duration,
end_var + 2, presence_var,
'interval')
print(f'interval = {repr(interval_var)}')
# If the size is fixed, a simpler version uses the start expression and the
# size.
fixed_size_interval_var = model.NewOptionalFixedSizeIntervalVar(
start_var, 10, presence_var, 'fixed_size_interval_var')
print(f'fixed_size_interval_var = {repr(fixed_size_interval_var)}')
# A fixed interval can be created using the same API.
fixed_interval = model.NewOptionalFixedSizeIntervalVar(
5, 10, presence_var, 'fixed_interval')
print(f'fixed_interval = {repr(fixed_interval)}')
OptionalIntervalSampleSat()
| 40.604167 | 79 | 0.706003 |
4a270d687783e58ea4d0778236cfdbe76a9b5136 | 911 | py | Python | DPGAnalysis/Skims/python/EcalTangentSkim_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | DPGAnalysis/Skims/python/EcalTangentSkim_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | DPGAnalysis/Skims/python/EcalTangentSkim_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("NonTkPointSkim")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger = cms.Service(
"MessageLogger",
destinations = cms.untracked.vstring('output.txt')
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:scratch/867948E3-6CC4-DD11-9DB5-0019B9E7CD78.root'
)
)
process.filter = cms.EDFilter('EcalTangentFilter',
MuLabel = cms.string("muonsBarrelOnly"),
MuD0Min = cms.double(129),
MuD0Max = cms.double(152),
Verbose = cms.bool(False)
)
process.p1 = cms.Path(process.filter)
process.out = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p1')
),
fileName = cms.untracked.string('EcalTangentSkim.root')
)
process.o = cms.EndPath(process.out)
| 24.621622 | 73 | 0.749726 |
4a270dc17c0b5ca52d9adf06bc47434a839c7703 | 6,470 | py | Python | python_2dcfd/latticeBoltzmanDemo.py | matthewa313/2D-CFD | af5f0f81ae1ba55993eb331f08480927875a6d1a | [
"MIT"
] | 2 | 2021-05-06T00:04:40.000Z | 2021-05-06T08:02:06.000Z | python_2dcfd/latticeBoltzmanDemo.py | matthewa313/2d-cfd | af5f0f81ae1ba55993eb331f08480927875a6d1a | [
"MIT"
] | 1 | 2021-10-21T04:30:24.000Z | 2022-01-05T07:43:00.000Z | python_2dcfd/latticeBoltzmanDemo.py | matthewa313/2D-CFD | af5f0f81ae1ba55993eb331f08480927875a6d1a | [
"MIT"
] | null | null | null | # LatticeBoltzmannDemo.py: a two-dimensional lattice-Boltzmann "wind tunnel" simulation
# Uses numpy to speed up all array handling.
# Uses matplotlib to plot and animate the curl of the macroscopic velocity field.
# to-do make interactive
import numpy
import time
import matplotlib.pyplot
import matplotlib.animation
# Define constants:
height = 80 # lattice dimensions
width = 200
viscosity = 0.02 # fluid viscosity
omega = 1 / (3 * viscosity + 0.5) # "relaxation" parameter
u0 = 0.1 # initial and in-flow speed
FOUR_NINTHS = 4.0 / 9.0 # abbreviations for lattice-Boltzmann weight factors
ONE_NINTH = 1.0 / 9.0
ONE_THIRTYSIXTH = 1.0 / 36.0
# precomputre a repeated term
u02 = u0 * u0
# Initialize all the arrays to steady rightward flow:
# particle densities along 9 directions
n0 = FOUR_NINTHS * (numpy.ones((height, width)) - 1.5 * u02)
nN = ONE_NINTH * (numpy.ones((height, width)) - 1.5 * u02)
nS = ONE_NINTH * (numpy.ones((height, width)) - 1.5 * u02)
nE = ONE_NINTH * (numpy.ones((height, width)) + 3 * u0 + 4.5 * u02 - 1.5 * u02)
nW = ONE_NINTH * (numpy.ones((height, width)) - 3 * u0 + 4.5 * u02 - 1.5 * u02)
nNE = ONE_THIRTYSIXTH * (numpy.ones((height, width)) + 3 * u0 + 4.5 * u02 - 1.5 * u02)
nSE = ONE_THIRTYSIXTH * (numpy.ones((height, width)) + 3 * u0 + 4.5 * u02 - 1.5 * u02)
nNW = ONE_THIRTYSIXTH * (numpy.ones((height, width)) - 3 * u0 + 4.5 * u02 - 1.5 * u02)
nSW = ONE_THIRTYSIXTH * (numpy.ones((height, width)) - 3 * u0 + 4.5 * u02 - 1.5 * u02)
rho = n0 + nN + nS + nE + nW + nNE + nSE + nNW + nSW # macroscopic density
ux = (nE + nNE + nSE - nW - nNW - nSW) / rho # macroscopic x velocity
uy = (nN + nNE + nNW - nS - nSE - nSW) / rho # macroscopic y velocity
# Initialize barriers:
# True wherever there's a barrier
barrier = numpy.zeros((height, width), bool)
barrier[40][32:48] = True # simple linear barrier
barrierN = numpy.roll(barrier, 1, axis=0) # sites just north of barriers
barrierS = numpy.roll(barrier, -1, axis=0) # sites just south of barriers
barrierE = numpy.roll(barrier, 1, axis=1) # etc.
barrierW = numpy.roll(barrier, -1, axis=1)
barrierNE = numpy.roll(barrierN, 1, axis=1)
barrierNW = numpy.roll(barrierN, -1, axis=1)
barrierSE = numpy.roll(barrierS, 1, axis=1)
barrierSW = numpy.roll(barrierS, -1, axis=1)
# Move all particles by one step along their directions of motion (pbc):
def stream():
global nN, nS, nE, nW, nNE, nNW, nSE, nSW
# axis 0 is north-south; + direction is north
nN = numpy.roll(nN, 1, axis=0)
nNE = numpy.roll(nNE, 1, axis=0)
nNW = numpy.roll(nNW, 1, axis=0)
nS = numpy.roll(nS, -1, axis=0)
nSE = numpy.roll(nSE, -1, axis=0)
nSW = numpy.roll(nSW, -1, axis=0)
# axis 1 is east-west; + direction is east
nE = numpy.roll(nE, 1, axis=1)
nNE = numpy.roll(nNE, 1, axis=1)
nSE = numpy.roll(nSE, 1, axis=1)
nW = numpy.roll(nW, -1, axis=1)
nNW = numpy.roll(nNW, -1, axis=1)
nSW = numpy.roll(nSW, -1, axis=1)
# Use tricky boolean arrays to handle barrier collisions (bounce-back):
nN[barrierN] = nS[barrier]
nS[barrierS] = nN[barrier]
nE[barrierE] = nW[barrier]
nW[barrierW] = nE[barrier]
nNE[barrierNE] = nSW[barrier]
nNW[barrierNW] = nSE[barrier]
nSE[barrierSE] = nNW[barrier]
nSW[barrierSW] = nNE[barrier]
# Collide particles within each cell to redistribute velocities (could be optimized a little more):
def collide():
global rho, ux, uy, n0, nN, nS, nE, nW, nNE, nNW, nSE, nSW
rho = n0 + nN + nS + nE + nW + nNE + nSE + nNW + nSW
ux = (nE + nNE + nSE - nW - nNW - nSW) / rho
uy = (nN + nNE + nNW - nS - nSE - nSW) / rho
ux2 = ux * ux # pre-compute terms used repeatedly...
uy2 = uy * uy
u2 = ux2 + uy2
#u02 = u0 * u0
omu215 = 1 - 1.5 * u2 # "one minus u2 times 1.5"
uxuy = ux * uy
coeff = omega * rho
n0 = (1 - omega) * n0 + coeff * FOUR_NINTHS * omu215
nN = (1 - omega) * nN + coeff * ONE_NINTH * (omu215 + 3 * uy + 4.5 * uy2)
nS = (1 - omega) * nS + coeff * ONE_NINTH * (omu215 - 3 * uy + 4.5 * uy2)
nE = (1 - omega) * nE + coeff * ONE_NINTH * (omu215 + 3 * ux + 4.5 * ux2)
nW = (1 - omega) * nW + coeff * ONE_NINTH * (omu215 - 3 * ux + 4.5 * ux2)
nNE = (1 - omega) * nNE + coeff * ONE_THIRTYSIXTH * (omu215 + 3 * (ux + uy) + 4.5 * (u2 + 2 * uxuy))
nNW = (1 - omega) * nNW + coeff * ONE_THIRTYSIXTH * (omu215 + 3 * (-ux + uy) + 4.5 * (u2 - 2 * uxuy))
nSE = (1 - omega) * nSE + coeff * ONE_THIRTYSIXTH * (omu215 + 3 * (ux - uy) + 4.5 * (u2 - 2 * uxuy))
nSW = (1 - omega) * nSW + coeff * ONE_THIRTYSIXTH * (omu215 + 3 * (-ux - uy) + 4.5 * (u2 + 2 * uxuy))
# Force steady rightward flow at ends (no need to set 0, N, and S components):
nE[:, 0] = ONE_NINTH * (1 + 3 * u0 + 4.5 * u02 - 1.5 * u02)
nW[:, 0] = ONE_NINTH * (1 - 3 * u0 + 4.5 * u02 - 1.5 * u02)
nNE[:, 0] = ONE_THIRTYSIXTH * (1 + 3 * u0 + 4.5 * u02 - 1.5 * u02)
nSE[:, 0] = ONE_THIRTYSIXTH * (1 + 3 * u0 + 4.5 * u02 - 1.5 * u02)
nNW[:, 0] = ONE_THIRTYSIXTH * (1 - 3 * u0 + 4.5 * u02 - 1.5 * u02)
nSW[:, 0] = ONE_THIRTYSIXTH * (1 - 3 * u0 + 4.5 * u02 - 1.5 * u02)
# Compute curl of the macroscopic velocity field:
def curl(ux, uy):
return (numpy.roll(uy, -1, axis=1) - numpy.roll(uy, 1, axis=1) - numpy.roll(ux, -1, axis=0) + numpy.roll(ux, 1, axis=0))
# Here comes the graphics and animation...
theFig = matplotlib.pyplot.figure(figsize=(8, 3))
fluidImage = matplotlib.pyplot.imshow(curl(ux, uy), origin='lower', norm=matplotlib.pyplot.Normalize(-.1, .1), cmap=matplotlib.pyplot.get_cmap('jet'), interpolation='none')
# See http://www.loria.fr/~rougier/teaching/matplotlib/#colormaps for other cmap options
bImageArray = numpy.zeros((height, width, 4), numpy.uint8) # an RGBA image
bImageArray[barrier, 3] = 255 # set alpha=255 only at barrier sites
barrierImage = matplotlib.pyplot.imshow(
bImageArray, origin='lower', interpolation='none')
# Function called for each successive animation frame:
startTime = time.clock()
def nextFrame(arg): # (arg is the frame number, which we don't need)
global startTime
for step in range(20): # adjust number of steps for smooth animation
stream()
collide()
fluidImage.set_array(curl(ux, uy))
return (fluidImage, barrierImage) # return the figure elements to redraw
animate = matplotlib.animation.FuncAnimation(theFig, nextFrame, interval=1, blit=True)
matplotlib.pyplot.show()
| 48.283582 | 172 | 0.622566 |
4a270ef941f0fc1aa7ec9e3a6222169e0ccb42a2 | 996 | py | Python | Indicators/AccumulationNDistributionLine.py | puchchi/stock_scraper_latest | 09abfa12edbec0d6a65915db37ad3ad1b25fa092 | [
"MIT"
] | null | null | null | Indicators/AccumulationNDistributionLine.py | puchchi/stock_scraper_latest | 09abfa12edbec0d6a65915db37ad3ad1b25fa092 | [
"MIT"
] | null | null | null | Indicators/AccumulationNDistributionLine.py | puchchi/stock_scraper_latest | 09abfa12edbec0d6a65915db37ad3ad1b25fa092 | [
"MIT"
] | null | null | null | import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import pandas as pd
class kAccumulationNDistributionLine():
def __init__(self):
#do nothing
print "In kADL class"
def calculate(self, dataFrame):
##### FORMULA #####
#1. Money Flow Multiplier = [(Close - Low) - (High - Close)] /(High - Low)
#2. Money Flow Volume = Money Flow Multiplier x Volume for the Period
#3. ADL = Previous ADL + Current Period's Money Flow Volume
moneyFlowMul = ((dataFrame['Close'] - dataFrame['Low']) - (dataFrame['High'] - dataFrame['Close'])) / (dataFrame['High'] - dataFrame['Low'])
moneyFlowVolume = moneyFlowMul * dataFrame['Volume']
previousADL = pd.Series(moneyFlowVolume[1:], index = moneyFlowVolume.index)
# We need to fill 0th entry with 0
previousADL = previousADL.fillna(0)
ADL = previousADL + moneyFlowVolume
return ADL
| 34.344828 | 148 | 0.630522 |
4a270f452faf9a6f15eef8da85622d7f03cbb767 | 1,521 | py | Python | cdt/__init__.py | koutrgor/CausalDiscoveryToolbox | 0d9997294520430e24bd80f1bfb05c0f852984cb | [
"MIT"
] | null | null | null | cdt/__init__.py | koutrgor/CausalDiscoveryToolbox | 0d9997294520430e24bd80f1bfb05c0f852984cb | [
"MIT"
] | null | null | null | cdt/__init__.py | koutrgor/CausalDiscoveryToolbox | 0d9997294520430e24bd80f1bfb05c0f852984cb | [
"MIT"
] | null | null | null | """The Causal Discovery Toolbox contains various methods for graph structure recovery and causal inference.
It is CUDA-compatible for the most computationally expensive algorithms.
.. MIT License
..
.. Copyright (c) 2018 Diviyan Kalainathan
..
.. Permission is hereby granted, free of charge, to any person obtaining a copy
.. of this software and associated documentation files (the "Software"), to deal
.. in the Software without restriction, including without limitation the rights
.. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
.. copies of the Software, and to permit persons to whom the Software is
.. furnished to do so, subject to the following conditions:
..
.. The above copyright notice and this permission notice shall be included in all
.. copies or substantial portions of the Software.
..
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
.. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
.. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
.. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
.. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
.. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
.. SOFTWARE.
"""
import cdt.causality
import cdt.independence
import cdt.generators
from cdt.utils import loss
from cdt.utils.Settings import SETTINGS
from cdt.utils import metrics
from cdt.utils.R import RPackages
| 43.457143 | 107 | 0.780408 |
4a270f52d7a58c6a92ac0508ffb7f034043f8aba | 1,607 | py | Python | setup.py | wppply/s4l | aedea4f73ddcae23d8d20d1c4881987c2da2f9d3 | [
"Apache-2.0"
] | 94 | 2019-10-23T13:30:48.000Z | 2022-01-26T17:44:39.000Z | setup.py | wppply/s4l | aedea4f73ddcae23d8d20d1c4881987c2da2f9d3 | [
"Apache-2.0"
] | 4 | 2019-12-16T01:54:32.000Z | 2021-05-17T07:02:35.000Z | setup.py | wppply/s4l | aedea4f73ddcae23d8d20d1c4881987c2da2f9d3 | [
"Apache-2.0"
] | 24 | 2019-10-30T01:28:42.000Z | 2022-03-12T07:51:10.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install s4l package."""
from setuptools import find_packages
from setuptools import setup
setup(
name='s4l',
version='1.0',
description='Code from the "S4L: Self-supervised Semi-supervised Learning" paper',
author='Google LLC',
author_email='[email protected]',
url='http://github.com/google-research/s4l',
license='Apache 2.0',
packages=find_packages(),
package_data={
},
scripts=[
],
install_requires=[
'future',
'numpy',
'absl-py',
'tensorflow',
'tensorflow-hub',
# For Google Cloud TPUs
'google-api-python-client',
'oauth2client',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='tensorflow self supervised semi supervised s4l learning',
)
| 30.320755 | 86 | 0.667082 |
4a271084e7581548b9e8d18902cb0b0c514ef1fa | 4,188 | py | Python | examples/graph_based_clustering_demo/train.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | 1 | 2022-03-25T12:04:51.000Z | 2022-03-25T12:04:51.000Z | examples/graph_based_clustering_demo/train.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | null | null | null | examples/graph_based_clustering_demo/train.py | zbmain/PGL | dbded6a1543248b0a33c05eb476ddc513401a774 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
import pgl
import paddle
import paddle.nn as nn
from pgl.utils.logger import log
import numpy as np
import yaml
from easydict import EasyDict as edict
import tqdm
from paddle.optimizer import Adam
from pgl.utils.data import Dataloader
from paddle.io import get_worker_info
from model import SkipGramModel
from dataset import ShardedDataset
from dataset import BatchNode2vecWalk
def save_embedding(model, save_dir):
save_files = os.path.join(save_dir, "embedding.txt")
print(model.state_dict().keys())
embed = model.state_dict()['emb.weight'].numpy()
with open(save_files, "w") as writer:
for idx, vec in enumerate(embed):
str_vec = ' '.join(map(str, vec))
writer.write("%s\t%s\n" % (idx, str_vec))
def load_from_file(path):
edges = []
with open(path) as inf:
for line in inf:
u, t = line.strip("\n").split("\t")
u, t = int(u), int(t)
edges.append((u, t))
edges = np.array(edges)
graph = pgl.Graph(edges)
return graph
def train(model, data_loader, optim, log_per_step=10):
model.train()
total_loss = 0.
total_sample = 0
for batch, (src, dsts) in enumerate(data_loader):
num_samples = len(src)
src = paddle.to_tensor(src)
dsts = paddle.to_tensor(dsts)
loss = model(src, dsts)
loss.backward()
optim.step()
optim.clear_grad()
total_loss += loss.numpy()[0] * num_samples
total_sample += num_samples
if batch % log_per_step == 0:
log.info("Batch %s %s-Loss %.6f" %
(batch, "train", loss.numpy()[0]))
return total_loss / total_sample
def main(args):
if not args.use_cuda:
paddle.set_device("cpu")
if args.edge_file:
graph = load_from_file(args.edge_file)
else:
ds = pgl.dataset.CoraDataset()
graph = ds.graph
model = SkipGramModel(
graph.num_nodes,
args.embed_size,
args.neg_num,
sparse=not args.use_cuda)
model = paddle.DataParallel(model)
train_ds = ShardedDataset(graph.nodes, repeat=args.epoch)
train_steps = int(len(train_ds) // args.batch_size)
log.info("train_steps: %s" % train_steps)
scheduler = paddle.optimizer.lr.PolynomialDecay(
learning_rate=args.learning_rate,
decay_steps=train_steps,
end_lr=0.0001)
optim = Adam(learning_rate=scheduler, parameters=model.parameters())
collate_fn = BatchNode2vecWalk(graph, args.walk_len, args.win_size,
args.neg_num, args.neg_sample_type, args.p,
args.q)
data_loader = Dataloader(
train_ds,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.sample_workers,
collate_fn=collate_fn)
train_loss = train(model, data_loader, optim)
# paddle.save(model.state_dict(), "model.pdparams")
save_embedding(model, "./")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Deepwalk')
parser.add_argument("--use_cuda", action='store_true', help="use_cuda")
parser.add_argument("--conf", type=str, default="./config.yaml")
parser.add_argument("--epoch", type=int, default=40, help="Epoch")
parser.add_argument("--edge_file", type=str, default=None)
args = parser.parse_args()
# merge user args and config file
config = edict(yaml.load(open(args.conf), Loader=yaml.FullLoader))
config.update(vars(args))
main(config)
| 30.794118 | 78 | 0.658309 |
4a2714409960c2062160031b58dba4564e0d115c | 829 | py | Python | tests/test_memorize_util.py | ridi/django-shard-library | 405e1c213420e095f776d8d2969a147bb0793d9c | [
"BSD-3-Clause"
] | 17 | 2018-03-12T11:37:14.000Z | 2021-12-09T15:30:52.000Z | tests/test_memorize_util.py | ridi/django-shard-library | 405e1c213420e095f776d8d2969a147bb0793d9c | [
"BSD-3-Clause"
] | 12 | 2018-03-12T10:39:39.000Z | 2018-08-21T03:26:09.000Z | tests/test_memorize_util.py | ridi/django-shard-library | 405e1c213420e095f776d8d2969a147bb0793d9c | [
"BSD-3-Clause"
] | 3 | 2018-03-12T10:32:11.000Z | 2021-04-02T06:24:14.000Z | from shard.utils.memorize import memorize
from tests.base import BaseTestCase
def dummy_func(value):
return object()
class MemorizeUtilTestCase(BaseTestCase):
def setUp(self):
self.func = memorize(dummy_func)
def test_memorize(self):
obj = self.func(value=1)
obj2 = self.func(value=1)
self.assertEqual(id(obj), id(obj2))
def test_memorize_another_key(self):
obj = self.func(value=1)
obj2 = self.func(value=1)
obj3 = self.func(value=2)
self.assertEqual(id(obj), id(obj2))
self.assertNotEqual(id(obj), id(obj3))
self.assertNotEqual(id(obj2), id(obj3))
def test_memorize_clear(self):
obj = self.func(value=1, clear=True)
obj2 = self.func(value=1, clear=True)
self.assertNotEqual(id(obj), id(obj2))
| 25.121212 | 47 | 0.640531 |
4a2714a8239a3d55335bcaeafef9166e0ae72d95 | 14,440 | py | Python | .venv/lib/python3.8/site-packages/cleo/commands/completions_command.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | 1 | 2020-08-07T16:09:57.000Z | 2020-08-07T16:09:57.000Z | .venv/lib/python3.8/site-packages/cleo/commands/completions_command.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/cleo/commands/completions_command.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import hashlib
import posixpath
import re
import subprocess
from .._compat import encode
from ..helpers import argument
from ..helpers import option
from .command import Command
from .completions.templates import TEMPLATES
class CompletionsCommand(Command):
name = "completions"
description = "Generate completion scripts for your shell."
arguments = [
argument("shell", "The shell to generate the scripts for.", optional=True)
]
options = [
option(
"alias", None, "Alias for the current command.", flag=False, multiple=True
)
]
SUPPORTED_SHELLS = ("bash", "zsh", "fish")
hidden = True
help = """
One can generate a completion script for `<options=bold>{script_name}</>` that is compatible with \
a given shell. The script is output on `<options=bold>stdout</>` allowing one to re-direct \
the output to the file of their choosing. Where you place the file will \
depend on which shell, and which operating system you are using. Your \
particular configuration may also determine where these scripts need \
to be placed.
Here are some common set ups for the three supported shells under \
Unix and similar operating systems (such as GNU/Linux).
<options=bold>BASH</>:
Completion files are commonly stored in `<options=bold>/etc/bash_completion.d/</>`
Run the command:
`<options=bold>{script_name} {command_name} bash > /etc/bash_completion.d/{script_name}.bash-completion</>`
This installs the completion script. You may have to log out and log \
back in to your shell session for the changes to take effect.
<options=bold>FISH</>:
Fish completion files are commonly stored in\
`<options=bold>$HOME/.config/fish/completions</>`
Run the command:
`<options=bold>{script_name} {command_name} fish > ~/.config/fish/completions/{script_name}.fish</>`
This installs the completion script. You may have to log out and log \
back in to your shell session for the changes to take effect.
<options=bold>ZSH</>:
ZSH completions are commonly stored in any directory listed in your \
`<options=bold>$fpath</>` variable. To use these completions, you must either add the \
generated script to one of those directories, or add your own \
to this list.
Adding a custom directory is often the safest best if you're unsure \
of which directory to use. First create the directory, for this \
example we'll create a hidden directory inside our `<options=bold>$HOME</>` directory
`<options=bold>mkdir ~/.zfunc</>`
Then add the following lines to your `<options=bold>.zshrc</>` just before `<options=bold>compinit</>`
`<options=bold>fpath+=~/.zfunc</>`
Now you can install the completions script using the following command
`<options=bold>{script_name} {command_name} zsh > ~/.zfunc/_{script_name}</>`
You must then either log out and log back in, or simply run
`<options=bold>exec zsh</>`
For the new completions to take affect.
<options=bold>CUSTOM LOCATIONS</>:
Alternatively, you could save these files to the place of your choosing, \
such as a custom directory inside your $HOME. Doing so will require you \
to add the proper directives, such as `source`ing inside your login \
script. Consult your shells documentation for how to add such directives.
"""
def handle(self): # type: () -> int
shell = self.argument("shell")
if not shell:
shell = self.get_shell_type()
if shell not in self.SUPPORTED_SHELLS:
raise ValueError(
"[shell] argument must be one of {}".format(
", ".join(self.SUPPORTED_SHELLS)
)
)
self.line(self.render(shell))
return 0
def render(self, shell): # type: (str) -> str
return getattr(self, "render_{}".format(shell))()
def render_bash(self): # type: () -> str
template = TEMPLATES["bash"]
script_path = posixpath.realpath(self._args.script_name)
script_name = os.path.basename(script_path)
aliases = [script_name, script_path]
aliases += self.option("alias")
function = self._generate_function_name(script_name, script_path)
commands = []
global_options = set()
options_descriptions = {}
commands_options = {}
for option in self.application.config.options.values():
options_descriptions["--" + option.long_name] = self.io.remove_format(
option.description
)
global_options.add("--" + option.long_name)
for command in self.application.commands:
command_config = command.config
if not command_config.is_enabled() or command_config.is_hidden():
continue
command_options = []
commands.append(command_config.name)
options = command_config.options
for name in sorted(options.keys()):
option = options[name]
name = "--" + option.long_name
description = option.description
command_options.append(name)
options_descriptions[name] = description
commands_options[command_config.name] = command_options
compdefs = "\n".join(
[
"complete -o default -F {} {}".format(function, alias)
for alias in aliases
]
)
commands = sorted(commands)
command_list = []
for i, command in enumerate(commands):
options = set(commands_options[command]).difference(global_options)
options = sorted(options)
options = [self._zsh_describe(opt, None).strip('"') for opt in options]
desc = [
" ({})".format(command),
' opts="${{opts}} {}"'.format(" ".join(options)),
" ;;",
]
if i < len(commands) - 1:
desc.append("")
command_list.append("\n".join(desc))
output = template % {
"script_name": script_name,
"function": function,
"opts": " ".join(sorted(global_options)),
"coms": " ".join(commands),
"command_list": "\n".join(command_list),
"compdefs": compdefs,
}
return output
def render_zsh(self):
template = TEMPLATES["zsh"]
script_path = posixpath.realpath(self._args.script_name)
script_name = os.path.basename(script_path)
aliases = [script_path]
aliases += self.option("alias")
function = self._generate_function_name(script_name, script_path)
global_options = set()
commands_descriptions = []
options_descriptions = {}
commands_options_descriptions = {}
commands_options = {}
for option in self.application.config.options.values():
options_descriptions["--" + option.long_name] = self.io.remove_format(
option.description
)
global_options.add("--" + option.long_name)
for command in self.application.commands:
command_config = command.config
if not command_config.is_enabled() or command_config.is_hidden():
continue
command_options = []
commands_options_descriptions[command_config.name] = {}
command_description = self._io.remove_format(command_config.description)
commands_descriptions.append(
self._zsh_describe(command_config.name, command_description)
)
options = command_config.options
for name in sorted(options.keys()):
option = options[name]
name = "--" + option.long_name
description = self.io.remove_format(option.description)
command_options.append(name)
options_descriptions[name] = description
commands_options_descriptions[command_config.name][name] = description
commands_options[command_config.name] = command_options
compdefs = "\n".join(
["compdef {} {}".format(function, alias) for alias in aliases]
)
commands = sorted(list(commands_options.keys()))
command_list = []
for i, command in enumerate(commands):
options = set(commands_options[command]).difference(global_options)
options = sorted(options)
options = [
self._zsh_describe(opt, commands_options_descriptions[command][opt])
for opt in options
]
desc = [
" ({})".format(command),
" opts+=({})".format(" ".join(options)),
" ;;",
]
if i < len(commands) - 1:
desc.append("")
command_list.append("\n".join(desc))
opts = []
for opt in global_options:
opts.append(self._zsh_describe(opt, options_descriptions[opt]))
output = template % {
"script_name": script_name,
"function": function,
"opts": " ".join(sorted(opts)),
"coms": " ".join(sorted(commands_descriptions)),
"command_list": "\n".join(command_list),
"compdefs": compdefs,
}
return output
def render_fish(self):
template = TEMPLATES["fish"]
script_path = posixpath.realpath(self._args.script_name)
script_name = os.path.basename(script_path)
aliases = [script_name]
aliases += self.option("alias")
function = self._generate_function_name(script_name, script_path)
global_options = set()
commands_descriptions = {}
options_descriptions = {}
commands_options_descriptions = {}
commands_options = {}
for option in self.application.config.options.values():
options_descriptions["--" + option.long_name] = self.io.remove_format(
option.description
)
global_options.add("--" + option.long_name)
for command in self.application.commands:
command_config = command.config
if not command_config.is_enabled() or command_config.is_hidden():
continue
command_options = []
commands_options_descriptions[command_config.name] = {}
commands_descriptions[command_config.name] = self._io.remove_format(
command_config.description
)
options = command_config.options
for name in sorted(options.keys()):
option = options[name]
name = "--" + option.long_name
description = self._io.remove_format(option.description)
command_options.append(name)
options_descriptions[name] = description
commands_options_descriptions[command_config.name][name] = description
commands_options[command_config.name] = command_options
opts = []
for opt in sorted(global_options):
opts.append(
"complete -c {} -n '__fish{}_no_subcommand' "
"-l {} -d '{}'".format(
script_name,
function,
opt[2:],
options_descriptions[opt].replace("'", "\\'"),
)
)
cmds_names = sorted(list(commands_options.keys()))
cmds = []
cmds_opts = []
for i, cmd in enumerate(cmds_names):
cmds.append(
"complete -c {} -f -n '__fish{}_no_subcommand' "
"-a {} -d '{}'".format(
script_name,
function,
cmd,
commands_descriptions[cmd].replace("'", "\\'"),
)
)
cmds_opts += ["# {}".format(cmd)]
options = set(commands_options[cmd]).difference(global_options)
options = sorted(options)
for opt in options:
cmds_opts.append(
"complete -c {} -A -n '__fish_seen_subcommand_from {}' "
"-l {} -d '{}'".format(
script_name,
cmd,
opt[2:],
commands_options_descriptions[cmd][opt].replace("'", "\\'"),
)
)
if i < len(cmds_names) - 1:
cmds_opts.append("")
output = template % {
"script_name": script_name,
"function": function,
"cmds_names": " ".join(cmds_names),
"opts": "\n".join(opts),
"cmds": "\n".join(cmds),
"cmds_opts": "\n".join(cmds_opts),
}
return output
def get_shell_type(self):
shell = os.getenv("SHELL")
if not shell:
raise RuntimeError(
"Could not read SHELL environment variable. "
"Please specify your shell type by passing it as the first argument."
)
return os.path.basename(shell)
def _generate_function_name(self, script_name, script_path):
return "_{}_{}_complete".format(
self._sanitize_for_function_name(script_name),
hashlib.md5(encode(script_path)).hexdigest()[0:16],
)
def _sanitize_for_function_name(self, name):
name = name.replace("-", "_")
return re.sub("[^A-Za-z0-9_]+", "", name)
def _zsh_describe(self, value, description=None):
value = '"' + value.replace(":", "\\:")
if description:
description = re.sub(
r'(["\'#&;`|*?~<>^()\[\]{}$\\\x0A\xFF])', r"\\\1", description
)
value += ":{}".format(subprocess.list2cmdline([description]).strip('"'))
value += '"'
return value
| 34.879227 | 108 | 0.561219 |
4a2714db68763d08f4b47d7b6fc015f9a8b10e90 | 1,765 | py | Python | anchore_engine/analyzers/modules/20_file_list.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 1 | 2019-06-27T08:47:48.000Z | 2019-06-27T08:47:48.000Z | anchore_engine/analyzers/modules/20_file_list.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 4 | 2020-11-07T00:16:02.000Z | 2020-11-08T20:52:06.000Z | anchore_engine/analyzers/modules/20_file_list.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
] | 1 | 2019-11-23T03:39:28.000Z | 2019-11-23T03:39:28.000Z | #!/usr/bin/env python3
import sys
import os
import re
import json
import subprocess
import stat
import anchore_engine.analyzers.utils
analyzer_name = "file_list"
try:
config = anchore_engine.analyzers.utils.init_analyzer_cmdline(sys.argv, analyzer_name)
except Exception as err:
print(str(err))
sys.exit(1)
imgname = config['imgid']
imgid = config['imgid_full']
outputdir = config['dirs']['outputdir']
unpackdir = config['dirs']['unpackdir']
meta = anchore_engine.analyzers.utils.get_distro_from_squashtar(os.path.join(unpackdir, "squashed.tar"), unpackdir=unpackdir)
distrodict = anchore_engine.analyzers.utils.get_distro_flavor(meta['DISTRO'], meta['DISTROVERS'], likedistro=meta['LIKEDISTRO'])
simplefiles = {}
outfiles = {}
try:
allfiles = {}
fmap = {}
if os.path.exists(unpackdir + "/anchore_allfiles.json"):
with open(unpackdir + "/anchore_allfiles.json", 'r') as FH:
allfiles = json.loads(FH.read())
else:
fmap, allfiles = anchore_engine.analyzers.utils.get_files_from_squashtar(os.path.join(unpackdir, "squashed.tar"))
with open(unpackdir + "/anchore_allfiles.json", 'w') as OFH:
OFH.write(json.dumps(allfiles))
# fileinfo
for name in list(allfiles.keys()):
outfiles[name] = json.dumps(allfiles[name])
simplefiles[name] = oct(stat.S_IMODE(allfiles[name]['mode']))
except Exception as err:
import traceback
traceback.print_exc()
raise err
if simplefiles:
ofile = os.path.join(outputdir, 'files.all')
anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, simplefiles)
if outfiles:
ofile = os.path.join(outputdir, 'files.allinfo')
anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, outfiles)
sys.exit(0)
| 28.934426 | 128 | 0.713314 |
4a27158cee0df16bbf0802a2d6d8a0d961e03890 | 844 | py | Python | ubc/imsi_fail_trans.py | Wcopython/cpython_test | d65713a72df0f0a3db7c742bf7c5efbb5ea679a4 | [
"Apache-2.0"
] | null | null | null | ubc/imsi_fail_trans.py | Wcopython/cpython_test | d65713a72df0f0a3db7c742bf7c5efbb5ea679a4 | [
"Apache-2.0"
] | null | null | null | ubc/imsi_fail_trans.py | Wcopython/cpython_test | d65713a72df0f0a3db7c742bf7c5efbb5ea679a4 | [
"Apache-2.0"
] | null | null | null | # ! python
# -*- coding: utf-8 -*-
import sqlite3
import os
class CheckFailImsi(object):
def __init__(self):
self.db_path = '/tmp/db_cache/imsiSet.db'
def check_db_exsists(self):
if os.path.exists('/tmp/db_cache'):
return True
return False
def query_imsi(self, imsi, white_table_tag=None):
if not self.check_db_exsists():
return None
db_conn = sqlite3.connect(self.db_path)
query_str = '''SELECT imsi FROM imsi_transfail WHERE imsi=%s ''' % str(imsi)
if white_table_tag:
query_str = '''SELECT imsi FROM imsi_white WHERE imsi=%s ''' % str(imsi)
with db_conn:
cur = db_conn.cursor()
cur.execute(query_str)
rs = cur.fetchall()
if rs:
return True
return None
| 27.225806 | 84 | 0.577014 |
4a2715a8b85f53121e2a252279a7b0b3ffe47326 | 6,358 | py | Python | research/object_detection/protos/eval_pb2.py | tarlen5/models | 06646932bd571f4425b21e119104a6b93b0e244c | [
"Apache-2.0"
] | null | null | null | research/object_detection/protos/eval_pb2.py | tarlen5/models | 06646932bd571f4425b21e119104a6b93b0e244c | [
"Apache-2.0"
] | null | null | null | research/object_detection/protos/eval_pb2.py | tarlen5/models | 06646932bd571f4425b21e119104a6b93b0e244c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/eval.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"object_detection/protos/eval.proto\x12\x17object_detection.protos\"\xe9\x08\n\nEvalConfig\x12\x15\n\nbatch_size\x18\x19 \x01(\r:\x01\x31\x12\x1e\n\x12num_visualizations\x18\x01 \x01(\r:\x02\x31\x30\x12\x1e\n\x0cnum_examples\x18\x02 \x01(\r:\x04\x35\x30\x30\x30\x42\x02\x18\x01\x12\x1f\n\x12\x65val_interval_secs\x18\x03 \x01(\r:\x03\x33\x30\x30\x12\x18\n\tmax_evals\x18\x04 \x01(\r:\x01\x30\x42\x02\x18\x01\x12\x19\n\nsave_graph\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\"\n\x18visualization_export_dir\x18\x06 \x01(\t:\x00\x12\x15\n\x0b\x65val_master\x18\x07 \x01(\t:\x00\x12\x13\n\x0bmetrics_set\x18\x08 \x03(\t\x12J\n\x14parameterized_metric\x18\x1f \x03(\x0b\x32,.object_detection.protos.ParameterizedMetric\x12\x15\n\x0b\x65xport_path\x18\t \x01(\t:\x00\x12!\n\x12ignore_groundtruth\x18\n \x01(\x08:\x05\x66\x61lse\x12\"\n\x13use_moving_averages\x18\x0b \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x65val_instance_masks\x18\x0c \x01(\x08:\x05\x66\x61lse\x12 \n\x13min_score_threshold\x18\r \x01(\x02:\x03\x30.5\x12&\n\x1amax_num_boxes_to_visualize\x18\x0e \x01(\x05:\x02\x32\x30\x12\x1a\n\x0bskip_scores\x18\x0f \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0bskip_labels\x18\x10 \x01(\x08:\x05\x66\x61lse\x12*\n\x1bvisualize_groundtruth_boxes\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\x32\n#groundtruth_box_visualization_color\x18\x12 \x01(\t:\x05\x62lack\x12\x35\n&keep_image_id_for_visualization_export\x18\x13 \x01(\x08:\x05\x66\x61lse\x12$\n\x16retain_original_images\x18\x17 \x01(\x08:\x04true\x12+\n\x1cinclude_metrics_per_category\x18\x18 \x01(\x08:\x05\x66\x61lse\x12\'\n\x18\x61ll_metrics_per_category\x18! \x01(\x08:\x05\x66\x61lse\x12\x1d\n\x12recall_lower_bound\x18\x1a \x01(\x02:\x01\x30\x12\x1d\n\x12recall_upper_bound\x18\x1b \x01(\x02:\x01\x31\x12\x38\n)retain_original_image_additional_channels\x18\x1c \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x0f\x66orce_no_resize\x18\x1d \x01(\x08:\x05\x66\x61lse\x12%\n\x16use_dummy_loss_in_eval\x18\x1e \x01(\x08:\x05\x66\x61lse\x12<\n\rkeypoint_edge\x18 \x03(\x0b\x32%.object_detection.protos.KeypointEdge\"|\n\x13ParameterizedMetric\x12M\n\x15\x63oco_keypoint_metrics\x18\x01 \x01(\x0b\x32,.object_detection.protos.CocoKeypointMetricsH\x00\x42\x16\n\x14parameterized_metric\"\xd3\x01\n\x13\x43ocoKeypointMetrics\x12\x13\n\x0b\x63lass_label\x18\x01 \x01(\t\x12i\n\x18keypoint_label_to_sigmas\x18\x02 \x03(\x0b\x32G.object_detection.protos.CocoKeypointMetrics.KeypointLabelToSigmasEntry\x1a<\n\x1aKeypointLabelToSigmasEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\"*\n\x0cKeypointEdge\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05')
_EVALCONFIG = DESCRIPTOR.message_types_by_name['EvalConfig']
_PARAMETERIZEDMETRIC = DESCRIPTOR.message_types_by_name['ParameterizedMetric']
_COCOKEYPOINTMETRICS = DESCRIPTOR.message_types_by_name['CocoKeypointMetrics']
_COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY = _COCOKEYPOINTMETRICS.nested_types_by_name['KeypointLabelToSigmasEntry']
_KEYPOINTEDGE = DESCRIPTOR.message_types_by_name['KeypointEdge']
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), {
'DESCRIPTOR' : _EVALCONFIG,
'__module__' : 'object_detection.protos.eval_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.EvalConfig)
})
_sym_db.RegisterMessage(EvalConfig)
ParameterizedMetric = _reflection.GeneratedProtocolMessageType('ParameterizedMetric', (_message.Message,), {
'DESCRIPTOR' : _PARAMETERIZEDMETRIC,
'__module__' : 'object_detection.protos.eval_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ParameterizedMetric)
})
_sym_db.RegisterMessage(ParameterizedMetric)
CocoKeypointMetrics = _reflection.GeneratedProtocolMessageType('CocoKeypointMetrics', (_message.Message,), {
'KeypointLabelToSigmasEntry' : _reflection.GeneratedProtocolMessageType('KeypointLabelToSigmasEntry', (_message.Message,), {
'DESCRIPTOR' : _COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY,
'__module__' : 'object_detection.protos.eval_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.CocoKeypointMetrics.KeypointLabelToSigmasEntry)
})
,
'DESCRIPTOR' : _COCOKEYPOINTMETRICS,
'__module__' : 'object_detection.protos.eval_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.CocoKeypointMetrics)
})
_sym_db.RegisterMessage(CocoKeypointMetrics)
_sym_db.RegisterMessage(CocoKeypointMetrics.KeypointLabelToSigmasEntry)
KeypointEdge = _reflection.GeneratedProtocolMessageType('KeypointEdge', (_message.Message,), {
'DESCRIPTOR' : _KEYPOINTEDGE,
'__module__' : 'object_detection.protos.eval_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.KeypointEdge)
})
_sym_db.RegisterMessage(KeypointEdge)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_EVALCONFIG.fields_by_name['num_examples']._options = None
_EVALCONFIG.fields_by_name['num_examples']._serialized_options = b'\030\001'
_EVALCONFIG.fields_by_name['max_evals']._options = None
_EVALCONFIG.fields_by_name['max_evals']._serialized_options = b'\030\001'
_COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY._options = None
_COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY._serialized_options = b'8\001'
_EVALCONFIG._serialized_start=64
_EVALCONFIG._serialized_end=1193
_PARAMETERIZEDMETRIC._serialized_start=1195
_PARAMETERIZEDMETRIC._serialized_end=1319
_COCOKEYPOINTMETRICS._serialized_start=1322
_COCOKEYPOINTMETRICS._serialized_end=1533
_COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY._serialized_start=1473
_COCOKEYPOINTMETRICS_KEYPOINTLABELTOSIGMASENTRY._serialized_end=1533
_KEYPOINTEDGE._serialized_start=1535
_KEYPOINTEDGE._serialized_end=1577
# @@protoc_insertion_point(module_scope)
| 77.536585 | 2,698 | 0.815036 |
4a2715f1ff3840e45bb39b2efc50ed48e68f5a9a | 1,779 | py | Python | prepare_data.py | SETIADEEPANSHU/covid-jina-qa | 6514624eac7f32f8ff3b536281c6dcced760c9dd | [
"Apache-2.0"
] | 2 | 2020-09-21T17:21:21.000Z | 2021-04-09T10:20:26.000Z | prepare_data.py | SETIADEEPANSHU/covid-jina-qa | 6514624eac7f32f8ff3b536281c6dcced760c9dd | [
"Apache-2.0"
] | null | null | null | prepare_data.py | SETIADEEPANSHU/covid-jina-qa | 6514624eac7f32f8ff3b536281c6dcced760c9dd | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import csv
import os
import re
import sys
def read_data(data_fn, output_fn):
_min_sent_len = 3
_max_sent_len = 64
punct_chars = ['!', '.', '?', '։', '؟', '۔', '܀', '܁', '܂', '‼', '‽', '⁇', '⁈', '⁉', '⸮', '﹖', '﹗',
'!', '.', '?', '。', '。']
_slit_pat = re.compile('([{0}])+([^{0}])'.format(''.join(punct_chars)))
_replace_pat = re.compile('{}'.format(punct_chars))
if not os.path.exists(data_fn):
print('file not found: {}'.format(data_fn))
doc_list = []
character_set = set()
with open(data_fn, 'r') as f:
f_h = csv.reader(f)
for _idx, l in enumerate(f_h):
if _idx == 0:
continue
_, _, name, line = l
line = line.strip('"')
sents_str = _slit_pat.sub(r'\1\n\2', '{}\n'.format(line))
sents_str = sents_str.rstrip('\n')
sents = [s.strip() for s in sents_str.split('\n') if _min_sent_len <= len(s.strip()) <= _max_sent_len]
character_set.add(name)
name = _replace_pat.sub(r'', name)
for s in sents:
doc_list.append('{}[SEP]{}'.format(name, s))
doc_list = list(frozenset(doc_list))
print('some statistics about the data:')
print('\tnum characters: {}'.format(len(character_set)))
print('\tnum documents: {}'.format(len(doc_list)))
with open(output_fn, 'w') as f:
f.write('\n'.join(doc_list))
if __name__ == '__main__':
data_dir = sys.argv[1]
read_data(os.path.join(data_dir, 'final_master_dataset.csv'))
# read_data(
# os.path.join(data_dir, 'All-seasons.csv'), os.path.join(data_dir, 'character-lines.csv'))
| 35.58 | 114 | 0.548623 |
4a2716a554928e44099dc2bff985ffaa41619e91 | 590 | py | Python | seldom-web-testing/test_dir/test_005_ddt.py | SeldomQA/seldom-platform | d165d33b586426669c537c89a0ff7d49d44c5b97 | [
"Apache-2.0"
] | 2 | 2022-02-12T15:02:04.000Z | 2022-02-26T12:40:03.000Z | seldom-web-testing/test_dir/test_005_ddt.py | SeldomQA/seldom-platform | d165d33b586426669c537c89a0ff7d49d44c5b97 | [
"Apache-2.0"
] | null | null | null | seldom-web-testing/test_dir/test_005_ddt.py | SeldomQA/seldom-platform | d165d33b586426669c537c89a0ff7d49d44c5b97 | [
"Apache-2.0"
] | 2 | 2022-02-22T02:45:49.000Z | 2022-03-18T12:32:32.000Z | import seldom
from seldom import data
class DDTTest(seldom.TestCase):
"""
数据驱动
"""
@data([
('case1', 'seldom'),
('case2', 'selenium'),
('case3', 'unittest'),
])
def test_baidu(self, _, search_key):
"""
used parameterized test
:param _: case name
:param search_key: search keyword
"""
self.open("https://www.baidu.com")
self.type(id_="kw", text=search_key)
self.click(css="#su")
self.assertInTitle(search_key)
if __name__ == '__main__':
seldom.main(debug=True)
| 20.344828 | 44 | 0.547458 |
4a2716c8f9157bcd67f7ef609f3b64f5212dc76d | 5,854 | py | Python | SoftLayer/managers/autoscale.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | SoftLayer/managers/autoscale.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | SoftLayer/managers/autoscale.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | """
SoftLayer.autoscale
~~~~~~~~~~~~~~~~~~~
Autoscale manager
:license: MIT, see LICENSE for more details.
"""
class AutoScaleManager(object):
"""Manager for interacting with Autoscale instances."""
def __init__(self, client):
self.client = client
def list(self, mask=None):
"""Calls `SoftLayer_Account::getScaleGroups()`_
:param mask: optional SoftLayer_Scale_Group objectMask
.. _SoftLayer_Account::getScaleGroups():
https://sldn.softlayer.com/reference/services/SoftLayer_Account/getScaleGroups/
"""
if not mask:
mask = "mask[status,virtualGuestMemberCount]"
return self.client.call('SoftLayer_Account', 'getScaleGroups', mask=mask, iter=True)
def details(self, identifier, mask=None):
"""Calls `SoftLayer_Scale_Group::getObject()`_
:param identifier: SoftLayer_Scale_Group id
:param mask: optional SoftLayer_Scale_Group objectMask
.. _SoftLayer_Scale_Group::getObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/getObject/
"""
if not mask:
mask = """mask[virtualGuestMembers[id,virtualGuest[id,hostname,domain,provisionDate]], terminationPolicy,
virtualGuestMemberCount, virtualGuestMemberTemplate[sshKeys],
policies[id,name,createDate,cooldown,actions,triggers,scaleActions],
networkVlans[networkVlanId,networkVlan[networkSpace,primaryRouter[hostname]]],
loadBalancers, regionalGroup[locations]]"""
return self.client.call('SoftLayer_Scale_Group', 'getObject', id=identifier, mask=mask)
def get_policy(self, identifier, mask=None):
"""Calls `SoftLayer_Scale_Policy::getObject()`_
:param identifier: SoftLayer_Scale_Policy id
:param mask: optional SoftLayer_Scale_Policy objectMask
.. _SoftLayer_Scale_Policy::getObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Policy/getObject/
"""
if not mask:
mask = """mask[cooldown, createDate, id, name, actions, triggers[type]
]"""
return self.client.call('SoftLayer_Scale_Policy', 'getObject', id=identifier, mask=mask)
def scale(self, identifier, amount):
"""Calls `SoftLayer_Scale_Group::scale()`_
:param identifier: SoftLayer_Scale_Group Id
:param amount: positive or negative number to scale the group by
.. _SoftLayer_Scale_Group::scale():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/scale/
"""
return self.client.call('SoftLayer_Scale_Group', 'scale', amount, id=identifier)
def scale_to(self, identifier, amount):
"""Calls `SoftLayer_Scale_Group::scaleTo()`_
:param identifier: SoftLayer_Scale_Group Id
:param amount: number to scale the group to.
.. _SoftLayer_Scale_Group::scaleTo():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/scaleTo/
"""
return self.client.call('SoftLayer_Scale_Group', 'scaleTo', amount, id=identifier)
def get_logs(self, identifier, mask=None, object_filter=None):
"""Calls `SoftLayer_Scale_Group::getLogs()`_
:param identifier: SoftLayer_Scale_Group Id
:param mask: optional SoftLayer_Scale_Group_Log objectMask
:param object_filter: optional SoftLayer_Scale_Group_Log objectFilter
.. _SoftLayer_Scale_Group::getLogs():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/getLogs/
"""
return self.client.call('SoftLayer_Scale_Group', 'getLogs', id=identifier, mask=mask, filter=object_filter,
iter=True)
def get_virtual_guests(self, identifier, mask=None):
"""Calls `SoftLayer_Scale_Group::getVirtualGuestMembers()`_
:param identifier: SoftLayer_Scale_Group Id
:param mask: optional SoftLayer_Scale_Member objectMask
.. _SoftLayer_Scale_Group::getVirtualGuestMembers():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/getVirtualGuestMembers/
"""
return self.client.call('SoftLayer_Scale_Group', 'getVirtualGuestMembers', id=identifier, mask=mask, iter=True)
def edit(self, identifier, template):
"""Calls `SoftLayer_Scale_Group::editObject()`_
:param identifier: SoftLayer_Scale_Group id
:param template: `SoftLayer_Scale_Group`_
.. _SoftLayer_Scale_Group::editObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/editObject/
.. _SoftLayer_Scale_Group: https://sldn.softlayer.com/reference/datatypes/SoftLayer_Scale_Group/
"""
return self.client.call('SoftLayer_Scale_Group', 'editObject', template, id=identifier)
def create(self, template):
"""Calls `SoftLayer_Scale_Group::createObject()`_
:param template: `SoftLayer_Scale_Group`_
.. _SoftLayer_Scale_Group::createObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/createObject/
.. _SoftLayer_Scale_Group: https://sldn.softlayer.com/reference/datatypes/SoftLayer_Scale_Group/
"""
return self.client.call('SoftLayer_Scale_Group', 'createObject', template)
def delete(self, identifier):
"""Calls `SoftLayer_Scale_Group::forceDeleteObject()`_
:param identifier: SoftLayer_Scale_Group id
.. _SoftLayer_Scale_Group::forceDeleteObject():
https://sldn.softlayer.com/reference/services/SoftLayer_Scale_Group/forceDeleteObject/
"""
return self.client.call('SoftLayer_Scale_Group', 'forceDeleteObject', id=identifier)
| 41.814286 | 119 | 0.678852 |
4a271879ce9bb21c65755cfce515838ff7c79c8c | 12,312 | py | Python | src/pygaps/characterisation/area_bet.py | pauliacomi/pyGAPS | c4d45b710e171c937471686437e382e05aec4ed5 | [
"MIT"
] | 35 | 2018-01-24T14:59:08.000Z | 2022-03-10T02:47:58.000Z | src/pygaps/characterisation/area_bet.py | pauliacomi/pyGAPS | c4d45b710e171c937471686437e382e05aec4ed5 | [
"MIT"
] | 29 | 2018-01-06T12:08:08.000Z | 2022-03-11T20:26:53.000Z | src/pygaps/characterisation/area_bet.py | pauliacomi/pyGAPS | c4d45b710e171c937471686437e382e05aec4ed5 | [
"MIT"
] | 20 | 2019-06-12T19:20:29.000Z | 2022-03-02T09:57:02.000Z | """This module contains BET area calculations."""
import logging
logger = logging.getLogger('pygaps')
import textwrap
import warnings
import numpy
from .. import scipy
from ..core.adsorbate import Adsorbate
from ..graphing.calc_graphs import bet_plot
from ..graphing.calc_graphs import roq_plot
from ..utilities.exceptions import CalculationError
from ..utilities.exceptions import ParameterError
from ..utilities.exceptions import pgError
def area_BET(isotherm, branch='ads', limits=None, verbose=False):
r"""
Calculate BET-determined surface area from an isotherm.
Pass an isotherm object to the function to have the BET method applied to it. Since
the function automatically takes the properties of the adsorbate from the master
list, ensure that it contains all the adsorbates which were used in the isotherms,
together with the properties required.
Parameters
----------
isotherm : PointIsotherm
The isotherm of which to calculate the BET surface area.
branch : {'ads', 'des'}, optional
Branch of the isotherm to use. It defaults to adsorption.
limits : [float, float], optional
Manual limits for region selection.
verbose : bool, optional
Prints extra information and plots graphs of the calculation.
Returns
-------
dict
A dictionary of results with the following components. The basis of these
results will be derived from the basis of the isotherm (per mass or per
volume of adsorbent):
- ``area`` (float) : calculated BET surface area, in m2/unit of adsorbent
- ``c_const`` (float) : the C constant in the BET equation, unitless
- ``n_monolayer`` (float) : the amount adsorbed at statistical monolayer, in mmol
- ``p_monolayer`` (float) : the pressure at which statistical monolayer is chosen, relative
- ``bet_slope`` (float) : slope of the BET plot
- ``bet_intercept`` (float) : intercept of the BET plot
- ``corr_coef`` (float) : correlation coefficient of the linear region in the BET plot
Notes
-----
*Description*
The BET method [#]_ is one of the first standardised methods to calculate the
surface area of a porous material. It is generally applied on isotherms obtained
through N2 adsorption at 77K, although other adsorbates (Ar, Kr) have been used.
It assumes that the adsorption happens on the surface of the material in
incremental layers according to the BET theory. Even if the adsorbent is porous,
the initial amount adsorbed (usually between 0.05 - 0.4 :math:`p/p_0`) can be
modelled through the BET equation:
.. math::
\frac{p/p_0}{n_{ads} (1-p/p_0)} = \frac{1}{n_{m} C} + \frac{C - 1}{n_{m} C}(p/p_0)
Therefore, if we plot the isotherm points as :math:`\frac{p/p_0}{n_{ads}(1-p/p_0)}` versus
:math:`p/p_0`, a linear region can usually be found. The slope and intercept of this line
can then be used to calculate :math:`n_{m}`, the amount adsorbed at the statistical
monolayer, as well as C, the BET constant.
.. math::
n_{m} = \frac{1}{s+i}
C = \frac{s}{i} + 1
The surface area can then be calculated by using the moles adsorbed at the statistical
monolayer. If the specific area taken by one of the adsorbate molecules on the surface
is known, it is inserted in the following equation together with Avogadro's number:
.. math::
a(BET) = n_m A_N \sigma
*Limitations*
While a standard for surface area determinations, the BET area should be used with care,
as there are many assumptions made in the calculation. To augment the validity of the BET
method, Rouquerol [#]_ proposed several checks to ensure that the BET region selected is valid
* The BET constant (C) obtained should be positive
* In the corresponding Rouquerol plot where :math:`n_{ads}(1-p/p_0)` is plotted
with respect to :math:`p/p_0`, the points chosen for BET analysis should be
strictly increasing
* The loading at the statistical monolayer should be situated within the
limits of the BET region
This module implements all these checks.
Regardless, the BET surface area should still be interpreted carefully. The following
assumptions are implicitly made in this approach:
* Adsorption takes place on the pore surface. Microporous materials which have pores
in similar size as the molecule adsorbed cannot posses a realistic surface area
* The cross-sectional area of the molecule on the surface cannot be guaranteed
For example, nitrogen has been known to adopt different orientations on the
surface of some materials due to inter-molecular forces, which effectively
lowers its cross-sectional area.
* No account is made for heterogeneous adsorbate-adsorbent interaction in the BET theory
References
----------
.. [#] “Adsorption of Gases in Multimolecular Layers”, Stephen Brunauer,
P. H. Emmett and Edward Teller, J. Amer. Chem. Soc., 60, 309(1938)
.. [#] "Adsorption by Powders & Porous Solids", F. Rouquerol, J Rouquerol
and K. Sing, Academic Press, 1999
"""
# get adsorbate properties
adsorbate = Adsorbate.find(isotherm.adsorbate)
cross_section = adsorbate.get_prop("cross_sectional_area")
# Read data in
loading = isotherm.loading(
branch=branch,
loading_unit='mol',
loading_basis='molar',
)
try:
pressure = isotherm.pressure(
branch=branch,
pressure_mode='relative',
)
except pgError:
raise CalculationError(
"The isotherm cannot be converted to a relative basis. "
"Is your isotherm supercritical?"
)
# use the bet function
(
bet_area, c_const, n_monolayer, p_monolayer, slope, intercept, minimum,
maximum, corr_coef
) = area_BET_raw(pressure, loading, cross_section, limits=limits)
if verbose:
logger.info(
textwrap.dedent(
f"""\
BET surface area: a = {bet_area:.2e} m2/{isotherm.material_unit}
Minimum pressure point is {pressure[minimum]:.3f} and maximum is {pressure[maximum -1]:.3f}
The slope of the BET fit: s = {slope:.2e}
The intercept of the BET fit: i = {intercept:.2e}
The BET constant is: C = {c_const:.1f}
Amount for a monolayer: n = {n_monolayer:.2e} mol/{isotherm.material_unit}"""
)
)
# Generate plot of the BET points chosen
bet_plot(
pressure, bet_transform(pressure, loading), minimum, maximum,
slope, intercept, p_monolayer,
bet_transform(p_monolayer, n_monolayer)
)
# Generate plot of the Rouquerol points chosen
roq_plot(
pressure, roq_transform(pressure, loading), minimum, maximum,
p_monolayer, roq_transform(p_monolayer, n_monolayer)
)
return {
'area': bet_area,
'c_const': c_const,
'n_monolayer': n_monolayer,
'p_monolayer': p_monolayer,
'bet_slope': slope,
'bet_intercept': intercept,
'corr_coef': corr_coef,
'limits': [minimum, maximum]
}
def area_BET_raw(pressure, loading, cross_section, limits=None):
"""
Calculate BET-determined surface area.
This is a 'bare-bones' function to calculate BET surface area which is
designed as a low-level alternative to the main function.
Designed for advanced use, its parameters have to be manually specified.
Parameters
----------
pressure : array
Pressures, relative.
loading : array
Loadings, in mol/basis.
cross_section : float
Adsorbed cross-section of the molecule of the adsorbate, in nm.
limits : [float, float], optional
Manual limits for region selection.
Returns
-------
area : float
Calculated BET surface area.
c_const : float
C constant from the BET equation.
n_monolayer : float
Adsorbed quantity in the statistical monolayer.
p_monolayer : float
Pressure at the statistical monolayer.
slope : float
Calculated slope of the BET plot.
intercept : float
Calculated intercept of the BET plot.
minimum : float
Minimum point taken for the linear region.
maximum : float
Maximum point taken for the linear region.
corr_coef : float
Correlation coefficient of the straight line in the BET plot.
"""
# Check lengths
if len(pressure) != len(loading):
raise ParameterError(
"The length of the pressure and loading arrays do not match."
)
# Ensure numpy arrays, if not already
loading = numpy.asarray(loading)
pressure = numpy.asarray(pressure)
# select the maximum and minimum of the points and the pressure associated
maximum = len(pressure)
minimum = 0
if limits is None:
# Generate the Rouquerol array
roq_t_array = roq_transform(pressure, loading)
# Find place where array starts decreasing
# If none is found, maximum will be left as-is
for index, value in enumerate(roq_t_array):
if value > roq_t_array[index + 1]:
maximum = index + 1
break
# Min pressure is initially taken as 10% of max
min_p = pressure[maximum] / 10
minimum = numpy.searchsorted(pressure, min_p)
# Try to extend if not enough points
if maximum - minimum < 3: # (for 3 point minimum)
if maximum > 2: # Can only extend if enough points available
minimum = maximum - 3
else:
raise CalculationError(
"The isotherm does not have enough points (at least 3) "
"in the BET region. Unable to calculate BET area."
)
else:
# Determine the limits
if limits[1]:
maximum = numpy.searchsorted(pressure, limits[1])
if limits[0]:
minimum = numpy.searchsorted(pressure, limits[0])
if maximum - minimum < 3: # (for 3 point minimum)
raise CalculationError(
"The isotherm does not have enough points (at least 3) "
"in the BET region. Unable to calculate BET area."
)
# calculate the BET transform, slope and intercept
bet_t_array = bet_transform(
pressure[minimum:maximum], loading[minimum:maximum]
)
slope, intercept, corr_coef = bet_optimisation(
pressure[minimum:maximum], bet_t_array
)
# calculate the BET parameters
n_monolayer, p_monolayer, c_const, bet_area = bet_parameters(
slope, intercept, cross_section
)
# Checks for consistency
if c_const < 0:
warnings.warn("The C constant is negative.")
if corr_coef < 0.99:
warnings.warn("The correlation is not linear.")
if not (loading[minimum] < n_monolayer < loading[maximum - 1]):
warnings.warn("The monolayer point is not within the BET region")
return (
bet_area,
c_const,
n_monolayer,
p_monolayer,
slope,
intercept,
minimum,
maximum,
corr_coef,
)
def roq_transform(pressure, loading):
"""Rouquerol transform function."""
return loading * (1 - pressure)
def bet_transform(pressure, loading):
"""BET transform function."""
return pressure / roq_transform(pressure, loading)
def bet_optimisation(pressure, bet_points):
"""Find the slope and intercept of the BET region."""
slope, intercept, corr_coef, p, stderr = scipy.stats.linregress(
pressure, bet_points
)
return slope, intercept, corr_coef
def bet_parameters(slope, intercept, cross_section):
"""Calculate the BET parameters from the slope and intercept."""
c_const = (slope / intercept) + 1
n_monolayer = 1 / (intercept * c_const)
p_monolayer = 1 / (numpy.sqrt(c_const) + 1)
bet_area = n_monolayer * cross_section * (10**(-18)) * scipy.const.Avogadro
return n_monolayer, p_monolayer, c_const, bet_area
| 35.583815 | 103 | 0.651803 |
4a27189b207858859eee17a5f016c4f7c9b8310d | 663 | py | Python | tasks/__init__.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
] | null | null | null | tasks/__init__.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
] | null | null | null | tasks/__init__.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# type: ignore
# copyright: (c) 2020 by Jesse Johnson.
# license: Apache 2.0, see LICENSE for more details.
'''Initialize project management tasks.'''
from invoke import Collection
from . import (
compose, docs, webui, webapp, build, certs, qa, setup
)
ns = Collection().from_module(setup)
ns.add_collection(Collection.from_module(build))
ns.add_collection(Collection.from_module(compose))
ns.add_collection(Collection.from_module(certs))
ns.add_collection(Collection.from_module(webapp))
ns.add_collection(Collection.from_module(webui))
ns.add_collection(Collection.from_module(docs))
ns.add_collection(Collection.from_module(qa))
| 31.571429 | 57 | 0.776772 |
4a2718ba98a65e72f3c9cf446c73d7552e3b0d78 | 1,706 | py | Python | eval/tests/dp_multikey.py | AprilXiaoyanLiu/whitenoise-system | 0e94d2cc8114b97a61d5d2e45278428f91f1e687 | [
"MIT"
] | 63 | 2020-03-26T15:26:10.000Z | 2020-10-22T06:26:38.000Z | eval/tests/dp_multikey.py | AprilXiaoyanLiu/whitenoise-system | 0e94d2cc8114b97a61d5d2e45278428f91f1e687 | [
"MIT"
] | 87 | 2021-02-20T20:43:49.000Z | 2022-03-31T16:24:46.000Z | eval/tests/dp_multikey.py | AprilXiaoyanLiu/whitenoise-system | 0e94d2cc8114b97a61d5d2e45278428f91f1e687 | [
"MIT"
] | 17 | 2021-02-18T18:47:09.000Z | 2022-03-01T06:44:17.000Z | from sneval.params._privacy_params import PrivacyParams
from sneval.params._eval_params import EvaluatorParams
from sneval.report._report import Report
from sneval.privacyalgorithm._base import PrivacyAlgorithm
from snsql.sql import PrivateReader
class DPMultiKey(PrivacyAlgorithm):
"""
Sample implementation of PrivacyAlgorithm Interface
that allows for the library to be stochastically tested by
evaluator.
"""
def prepare(self, algorithm : object, privacy_params: PrivacyParams, eval_params: EvaluatorParams):
"""
Load the algorithm (in this case SQL aggregation query) to be used for acting on the dataset
Initialize the privacy params that need to be used by the function
for calculating differentially private noise
"""
self.algorithm = algorithm
self.privacy_params = privacy_params
self.eval_params = eval_params
def release(self, dataset: object) -> Report:
"""
Dataset is Pandas Dataframe with multiple columns and we need to sum
elements in each column and assign a key (column name) for each column.
Releases count per key based on the number of repetitions
requested by eval_params if actual is set of False.
Actual response is only returned once
"""
noisy_res = self.algorithm(dataset, self.privacy_params, self.eval_params)
return Report(noisy_res)
def actual_release(self, dataset: object) -> Report:
"""
Returns exact non-private response from algorithm
"""
actual_res = self.algorithm(dataset, self.privacy_params, self.eval_params, actual = True)
return Report(actual_res) | 43.74359 | 103 | 0.711606 |
4a2718fc0a0af2effde53999c492bd1652fc026b | 1,117 | py | Python | Chapter08/background_substraction_GMG.py | debojyoti007/OpenCV | 0096a6d0b6e9e4d5abc473c25def075403ccb0f2 | [
"MIT"
] | 105 | 2018-03-12T11:40:56.000Z | 2022-02-24T18:38:30.000Z | Chapter08/background_substraction_GMG.py | abelZW/OpenCV-3-x-with-Python-By-Example | 6810e2242ce7c9ea5e492d4d951c45cc99782785 | [
"MIT"
] | 1 | 2019-12-17T14:26:56.000Z | 2021-01-05T14:07:01.000Z | Chapter08/background_substraction_GMG.py | abelZW/OpenCV-3-x-with-Python-By-Example | 6810e2242ce7c9ea5e492d4d951c45cc99782785 | [
"MIT"
] | 97 | 2018-01-17T01:35:43.000Z | 2022-03-16T07:37:28.000Z | import cv2
import numpy as np
# Capture the input frame
def get_frame(cap, scaling_factor=0.5):
ret, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
if __name__=='__main__':
# Initialize the video capture object
cap = cv2.VideoCapture(1)
# Create the background subtractor object
bgSubtractor= cv2.bgsegm.createBackgroundSubtractorGMG()
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, ksize=(3,3))
# Iterate until the user presses the ESC key
while True:
frame = get_frame(cap, 0.5)
# Apply the background subtraction model to the input frame
mask = bgSubtractor.apply(frame)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
cv2.imshow('Input frame', frame)
cv2.imshow('Moving Objects', mask)
# Check if the user pressed the ESC key
c = cv2.waitKey(delay=30)
if c == 27:
break
cap.release()
cv2.destroyAllWindows() | 28.641026 | 70 | 0.644584 |
4a27192198783950225c3cbde02318b6cd58555f | 1,851 | py | Python | backend/api/fixtures/test/0005_add_bceid_user_roles.py | kuanfan99/zeva | 57b506a108fe57438506569d5503c90c52216b2f | [
"Apache-2.0"
] | 3 | 2020-03-25T03:06:20.000Z | 2021-01-20T23:36:03.000Z | backend/api/fixtures/test/0005_add_bceid_user_roles.py | kuanfan99/zeva | 57b506a108fe57438506569d5503c90c52216b2f | [
"Apache-2.0"
] | 740 | 2019-12-16T15:53:39.000Z | 2022-03-26T08:25:10.000Z | backend/api/fixtures/test/0005_add_bceid_user_roles.py | kuanfan99/zeva | 57b506a108fe57438506569d5503c90c52216b2f | [
"Apache-2.0"
] | 11 | 2019-11-28T20:39:15.000Z | 2022-01-31T17:53:31.000Z | from django.db import transaction
from api.management.data_script import OperationalDataScript
from api.models.role import Role
from api.models.user_profile import UserProfile
from api.models.user_role import UserRole
class AddBCEIDUserRoles(OperationalDataScript):
"""
Gives our test accounts (bceid) user roles so we can do something in the
system
"""
is_revertable = False
comment = 'Adds bceid user roles'
list_of_users = [
"ARING_BCEID",
"EMHILLIE_BCEID",
"JADONALD_BCEID",
"KMENKE_BCEID",
"KLEFLER_BCEID",
"KFAN_BCEID",
"RTAN_BCEID"
]
list_of_roles = [
"Organization Administrator", "Signing Authority", "ZEVA User"
]
def check_run_preconditions(self):
for username in self.list_of_users:
if UserRole.objects.filter(
user_profile__username=username
).exists():
return False
return True
@transaction.atomic
def run(self):
user_roles_added = 0
for username in self.list_of_users:
user_profile = UserProfile.objects.filter(
username=username
).first()
if user_profile is None:
continue
for role_code in self.list_of_roles:
role = Role.objects.get(
role_code=role_code
)
(_, created) = UserRole.objects.get_or_create(
role=role,
user_profile=user_profile,
defaults={
'create_user': 'SYSTEM'
}
)
if created:
user_roles_added += 1
print("Added {} user roles.".format(user_roles_added))
script_class = AddBCEIDUserRoles
| 25.708333 | 76 | 0.568341 |
4a271936fb86ca991973af467aec333b367b6dd4 | 1,004 | py | Python | RumourEval2019Models/Bert-MFajcik/neural_bag/embedders.py | isspek/veracity-detection | 9368309722bead209e49e52c206758e3d173092a | [
"MIT"
] | null | null | null | RumourEval2019Models/Bert-MFajcik/neural_bag/embedders.py | isspek/veracity-detection | 9368309722bead209e49e52c206758e3d173092a | [
"MIT"
] | null | null | null | RumourEval2019Models/Bert-MFajcik/neural_bag/embedders.py | isspek/veracity-detection | 9368309722bead209e49e52c206758e3d173092a | [
"MIT"
] | null | null | null | import logging
import torch
class Embedder(torch.nn.Module):
def __init__(self, vocab, config):
super().__init__()
self.scale_grad = config['scale_emb_grad_by_freq']
self.init_vocab(vocab, config['optimize_embeddings'])
logging.info(f"Optimize embeddings = {config['optimize_embeddings']}")
logging.info(f"Scale grad by freq: {self.scale_grad}")
logging.info(f"Vocabulary size = {len(vocab.vectors)}")
def init_vocab(self, vocab, optimize_embeddings=False, device=None):
self.embedding_dim = vocab.vectors.shape[1]
self.embeddings = torch.nn.Embedding(len(vocab), self.embedding_dim, scale_grad_by_freq=self.scale_grad)
self.embeddings.weight.data.copy_(vocab.vectors)
self.embeddings.weight.requires_grad = optimize_embeddings
self.vocab = vocab
if device is not None:
self.embeddings = self.embeddings.to(device)
def forward(self, input):
return self.embeddings(input)
| 38.615385 | 112 | 0.691235 |
4a27194ab2c36f4bae6ebf331f7efa8ddf65687b | 5,727 | py | Python | distance_analysis.py | TorHou/ContigAnalysisScripts | 7cd240babf6c8889a25604d3cfb262e241fcfa11 | [
"MIT"
] | 1 | 2018-07-27T00:54:35.000Z | 2018-07-27T00:54:35.000Z | distance_analysis.py | DiltheyLab/ContigAnalysisScripts | 7cd240babf6c8889a25604d3cfb262e241fcfa11 | [
"MIT"
] | null | null | null | distance_analysis.py | DiltheyLab/ContigAnalysisScripts | 7cd240babf6c8889a25604d3cfb262e241fcfa11 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
from operator import itemgetter
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
from itertools import combinations
from scaffold import Longreads
parser = ArgumentParser()
parser.add_argument("inputfiles", help="Input Files in Error-Rate or PAF format", nargs="+")
parser.add_argument("summaryfile", help="Contig distance summary file")
parser.add_argument("linename", help="Name of cell line")
parser.add_argument("--blacklistfile", help="File containing long read ids where certain contig mappings should be ignored.")
parser.add_argument("--include_ambigious", help="Include ambigious contigs", action="store_true", default=False)
args = parser.parse_args()
reads = {}
greads = {}
cgreads = []
ambigious_contigs = set()
blacklist = {}
blacklist_fullread = set()
blacklist_contigs = set()
if args.blacklistfile:
with open(args.blacklistfile) as f:
for line in f:
sline = line.split()
if sline[0] == "contig":
blacklist_contigs.add(sline[1])
if sline[1] == "all":
blacklist_fullread.add(sline[0])
else:
blacklist[sline[0]] = sline[1]
lrs = Longreads(args.inputfiles, blacklist, args.linename)
lrs.filter_contigcounts(2)
lrs.turn_longreads_around()
lrs.sort_contigs_in_reads()
lrs = lrs.lreads
#print(greads)
distances = {}
# get distances of all neighbouring overlaps
'''
for rid in greads:
oviter = iter(greads[rid]["overlaps"])
#print(ov)
try:
ovold = next(oviter)
if ovold["contig"].startswith("chr"):
continue
except StopIteration:
continue
while True:
try:
ovnew = next(oviter)
if ovnew["contig"].startswith("chr"):
continue
except StopIteration:
break
#print("distance between " + ovold["contig"] + " and " + ovnew["contig"] + ": " + str(ovnew["scr"] - ovold["ecr"]- ovold["lc"] + ovold["ecc"] - ovnew["scc"]))
if ovnew["contig"] == ovold["contig"]:
continue
if ovnew["strand"] == ovold["strand"]:
distance = ovnew["scr"] - ovold["ecr"]- (ovold["lc"] - ovold["ecc"]) - ovnew["scc"] + 1
else:
continue
if int(ovold["contig"].rstrip(args.linename)) < int(ovnew["contig"].rstrip(args.linename)):
cstring = ovold["contig"] + "_" + ovnew["contig"]
else:
cstring = ovnew["contig"] + "_" + ovold["contig"]
if cstring in distances:
distances[cstring].append(distance)
else:
distances[cstring] = [distance]
ovold = ovnew
'''
# get distances of all overlaps
for rid in lrs:
for item in combinations(lrs[rid]['maps'], 2):
ovold, ovnew = item
if ovnew["name"].startswith("chr") or ovold["name"].startswith("chr"):
continue
if ovnew["name"] in ambigious_contigs or ovold["name"] in ambigious_contigs:
continue
if "_" in ovnew["name"] or "_" in ovold["name"]:
continue
if ovnew["name"] == ovold["name"]:
continue
if ovnew["strand"] == 1 or ovold["strand"] == 1:
continue
distance = ovnew["scr"] - ovold["ecr"]- (ovold["lenc"] - ovold["ecc"]) - ovnew["scc"] + 1
cstring = ovold["name"] + "_" + ovnew["name"]
if cstring in distances:
distances[cstring].append(distance)
else:
distances[cstring] = [distance]
#print(combo)
for key, value in distances.items():
#print(str(key) + " " + str(value))
if len(value)>1:
#print(str(key) + "\t" + str(value))
pass
distances2 = {}
with open(args.summaryfile) as f:
for line in f:
sline = line.split()
ctg1 = sline[0].split("_")[0].strip("+").strip("-")
ctg2 = sline[0].split("_")[1].strip("+").strip("-")
if line.startswith("-"):
continue
if sline[1] == "NA":
continue
if float(sline[4]) > 2:
continue
moddist = float(sline[1])
#if int(ctg1.rstrip(args.linename)) < int(ctg2.rstrip(args.linename)):
cstr = ctg1+"_"+ctg2
#else:
# cstr = ctg2+"_"+ctg1
if cstr in distances2:
if abs(moddist) < abs(distances2[cstr]):
distances2[cstr] = moddist
else:
distances2[cstr] = moddist
for name, dist in distances.items():
if name in distances2:
dist2 = distances2[name]
else:
dist2 = "-"
name1, name2 = name.split("_")
print("\t".join([name1, name2, str(dist), str(dist2)]))
df = pd.DataFrame.from_dict([distances, distances2])
#df.rename(index=
dc = df.T.rename(columns={0:'longread',1:'shortread'})
dc["longread_mean"] = dc.longread.apply(np.mean)
#dc['longread'] = np.mean(dc.longread)
dd = dc.dropna()
#get interesting differences
#print(dd[abs(dd['longread_mean'] - dd['shortread']) > 150])
#print(dd)
sthsth = []
for item in dd['longread_mean']:
sthsth.append(item < 0)
for idx, item in enumerate(dd['shortread']):
sthsth[idx] = sthsth[idx] or item < 0
#for name in dd[sthsth].index.values:
# print(name)
#print(dd[dd['longread_mean'] <= -20])
#print(dd.index.values)
plt.scatter(dd['longread_mean'], dd['shortread'],s= 6, alpha = 0.7)
plt.xlabel("Long Read Distances (mean: " + "{:.3f}".format(np.mean(dd['longread_mean'])) + ")")
#plt.xlabel("Long Read Distances")
plt.ylabel("Short Read Distances (mean: " + "{:.3f}".format(np.mean(dd['shortread'])) + ")")
#plt.ylabel("Short Read Distances")
plt.savefig('distances_scatter.pdf')
| 31.994413 | 166 | 0.590012 |
4a27195ede0e991b305e9239724db16570e831af | 1,410 | py | Python | app/test/test_config.py | Shulyakovskiy/flask-restplus-jwtext | 76cde85aa2f2b97912520c5f4dc810fe9fa2377a | [
"MIT"
] | null | null | null | app/test/test_config.py | Shulyakovskiy/flask-restplus-jwtext | 76cde85aa2f2b97912520c5f4dc810fe9fa2377a | [
"MIT"
] | null | null | null | app/test/test_config.py | Shulyakovskiy/flask-restplus-jwtext | 76cde85aa2f2b97912520c5f4dc810fe9fa2377a | [
"MIT"
] | null | null | null | import os
import unittest
from flask import current_app
from flask_testing import TestCase
from app.main.config import basedir
from server import app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('app.main.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertFalse(app.config['SECRET_KEY'] is 'my_precious')
self.assertTrue(app.config['DEBUG'] is True)
self.assertFalse(current_app is None)
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == 'sqlite:///' + os.path.join(basedir, 'flask_boilerplate_main.db')
)
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('app.main.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertFalse(app.config['SECRET_KEY'] is 'my_precious')
self.assertTrue(app.config['DEBUG'])
self.assertTrue(
app.config['SQLALCHEMY_DATABASE_URI'] == 'sqlite:///' + os.path.join(basedir, 'flask_boilerplate_test.db')
)
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('app.main.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertTrue(app.config['DEBUG'] is False)
if __name__ == '__main__':
unittest.main()
| 28.77551 | 118 | 0.689362 |
4a2719acaa06b1b3b04da43217d4f79e6a3933e0 | 1,449 | py | Python | Unit test/test_HornSchunk.py | khaledsabry97/Argus | c794f6e46ec529a836db127dfdb33b3161cf79ee | [
"MIT"
] | 9 | 2021-01-09T17:04:18.000Z | 2022-03-24T11:26:00.000Z | Unit test/test_HornSchunk.py | khaledsabry97/Argus | c794f6e46ec529a836db127dfdb33b3161cf79ee | [
"MIT"
] | 4 | 2021-03-21T21:40:06.000Z | 2022-03-12T00:53:55.000Z | Unit test/test_HornSchunk.py | khaledsabry97/Argus | c794f6e46ec529a836db127dfdb33b3161cf79ee | [
"MIT"
] | 3 | 2021-03-13T07:39:19.000Z | 2022-01-28T23:00:51.000Z | import cv2
from VIF.HornSchunck import HornSchunck
"""
Unit test for Horn Schunk class.
"""
def test_hs_init():
"""
Verify that object is created successfully
"""
try:
hs = HornSchunck()
assert 1
except:
assert 0
def test_hs_derivatives():
"""
Verify the results of differentiating two frames
"""
try:
frame1 = cv2.imread('Unit test/f0.jpg')
frame2 = cv2.imread('Unit test/f1.jpg')
shape = (134, 100)
frame1 = cv2.resize(frame1, shape)
frame2 = cv2.resize(frame2, shape)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
hs = HornSchunck()
x, y, t = hs.derivatives(frame1, frame2)
assert x.any() != 0 and y.any() != 0 and t.any() != 0
except:
assert 0
def test_hs_process():
"""
Verify the results of processing two frames
"""
try:
frame1 = cv2.imread('Unit test/f0.jpg')
frame2 = cv2.imread('Unit test/f1.jpg')
shape = (134, 100)
frame1 = cv2.resize(frame1, shape)
frame2 = cv2.resize(frame2, shape)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
hs = HornSchunck()
H, V, M = hs.process(frame1, frame2)
assert H.any() != 0 and V.any() != 0 and M.any() != 0
except:
assert 0
| 22.640625 | 60 | 0.57902 |
4a271a5f19adda163130d8bab44dae5574f77166 | 501 | py | Python | tests/test_news.py | Bernard2030/news-app | c5c7f2b04cc4a35709a81747ad8449d91921d655 | [
"Unlicense"
] | null | null | null | tests/test_news.py | Bernard2030/news-app | c5c7f2b04cc4a35709a81747ad8449d91921d655 | [
"Unlicense"
] | null | null | null | tests/test_news.py | Bernard2030/news-app | c5c7f2b04cc4a35709a81747ad8449d91921d655 | [
"Unlicense"
] | null | null | null | import unittest
from app.models import News
# News = news.News
class NewsTest(unittest.TestCase):
"""
Test class to test the behavior of the News class.
"""
def setUp(self):
"""
set up method that will run before every Test.
"""
self.current_news = News('A thrilling news api from python','')
def test_instance(self):
self.assertTrue(isinstance(self.current_news, News))
if __name__ == '__main__':
unittest.main()
| 25.05 | 71 | 0.61477 |
4a271a6ebcca22d00616dd50585ef775cf6ed2b3 | 52,771 | py | Python | pymatgen/analysis/defects/point_defects.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | 1 | 2015-05-18T14:31:20.000Z | 2015-05-18T14:31:20.000Z | pymatgen/analysis/defects/point_defects.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | null | null | null | pymatgen/analysis/defects/point_defects.py | rousseab/pymatgen | ecfba4a576a21f31c222be8fd20ce2ddaa77495a | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module defines classes for point defects
"""
import os
import abc
import json
from bisect import bisect_left
from pymatgen.core.periodic_table import Specie, Element
from pymatgen.core.sites import PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.io.zeo import get_voronoi_nodes, get_void_volume_surfarea, \
get_high_accuracy_voronoi_nodes
from pymatgen.command_line.gulp_caller import get_energy_buckingham, \
get_energy_relax_structure_buckingham
from pymatgen.analysis.structure_analyzer import VoronoiCoordFinder, \
RelaxationAnalyzer
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.analysis.bond_valence import BVAnalyzer
import six
from six.moves import filter
from six.moves import map
from six.moves import zip
file_dir = os.path.dirname(__file__)
rad_file = os.path.join(file_dir, 'ionic_radii.json')
with open(rad_file, 'r') as fp:
_ion_radii = json.load(fp)
class ValenceIonicRadiusEvaluator(object):
"""
Computes site valences and ionic radii for a structure using bond valence
analyzer
Args:
structure: pymatgen.core.structure.Structure
"""
def __init__(self, structure):
self._structure = structure.copy()
self._valences = self._get_valences()
self._ionic_radii = self._get_ionic_radii()
@property
def radii(self):
"""
List of ionic radii of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
radii_dict = dict(zip(el, self._ionic_radii))
#print radii_dict
return radii_dict
@property
def valences(self):
"""
List of oxidation states of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
valence_dict = dict(zip(el, self._valences))
return valence_dict
@property
def structure(self):
"""
Returns oxidation state decorated structurel.
"""
return self._structure.copy()
def _get_ionic_radii(self):
"""
Computes ionic radii of elements for all sites in the structure.
If valence is zero, atomic radius is used.
"""
radii = []
coord_finder = VoronoiCoordFinder(self._structure)
def nearest_key(sorted_vals, key):
i = bisect_left(sorted_vals, key)
if i == len(sorted_vals):
return sorted_vals[-1]
if i == 0:
return sorted_vals[0]
before = sorted_vals[i-1]
after = sorted_vals[i]
if after-key < key-before:
return after
else:
return before
for i in range(len(self._structure.sites)):
site = self._structure.sites[i]
if isinstance(site.specie,Element):
radius = site.specie.atomic_radius
radii.append(radius)
continue
el = site.specie.symbol
oxi_state = int(round(site.specie.oxi_state))
coord_no = int(round(coord_finder.get_coordination_number(i)))
try:
tab_oxi_states = sorted(map(int, _ion_radii[el].keys()))
oxi_state = nearest_key(tab_oxi_states, oxi_state)
radius = _ion_radii[el][str(oxi_state)][str(coord_no)]
except KeyError:
if coord_finder.get_coordination_number(i)-coord_no > 0:
new_coord_no = coord_no + 1
else:
new_coord_no = coord_no - 1
try:
radius = _ion_radii[el][str(oxi_state)][str(new_coord_no)]
coord_no = new_coord_no
except:
tab_coords = sorted(map(int, _ion_radii[el][str(oxi_state)].keys()))
new_coord_no = nearest_key(tab_coords, coord_no)
i = 0
for val in tab_coords:
if val > coord_no:
break
i = i + 1
if i == len(tab_coords):
key = str(tab_coords[-1])
radius = _ion_radii[el][str(oxi_state)][key]
elif i == 0:
key = str(tab_coords[0])
radius = _ion_radii[el][str(oxi_state)][key]
else:
key = str(tab_coords[i-1])
radius1 = _ion_radii[el][str(oxi_state)][key]
key = str(tab_coords[i])
radius2 = _ion_radii[el][str(oxi_state)][key]
radius = (radius1+radius2)/2
#implement complex checks later
radii.append(radius)
return radii
def _get_valences(self):
"""
Computes ionic valences of elements for all sites in the structure.
"""
try:
bv = BVAnalyzer()
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except:
try:
bv = BVAnalyzer(symm_tol=0.0)
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except:
valences = []
for site in self._structure.sites:
valences.append(site.specie.common_oxidation_states[0])
if sum(valences):
valences = [0]*self._structure.num_sites
else:
self._structure.add_oxidation_state_by_site(valences)
#raise
#el = [site.specie.symbol for site in self._structure.sites]
#el = [site.species_string for site in self._structure.sites]
#el = [site.specie for site in self._structure.sites]
#valence_dict = dict(zip(el, valences))
#print valence_dict
return valences
class Defect(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class for point defects
"""
@abc.abstractmethod
def enumerate_defectsites(self):
"""
Enumerates all the symmetrically distinct defects.
"""
raise NotImplementedError()
@property
def structure(self):
"""
Returns the structure without any defects
Useful for Mott-Littleton calculations.
"""
return self._structure
@property
def struct_radii(self):
"""
Radii of elements in the structure
"""
return self._rad_dict
@property
def struct_valences(self):
"""
Valence of elements in the structure
"""
return self._valence_dict
def defectsite_count(self):
"""
Returns the number of symmetrically distinct defect sites
"""
return len(self._defect_sites)
def get_defectsite(self, n):
"""
Returns the defect site at the index.
"""
return self._defect_sites[n]
def get_defectsite_multiplicity(self, n):
"""
Returns the symmtric multiplicity of the defect site at the index.
"""
return self._defect_site_multiplicity[n]
def get_defectsite_coordination_number(self, n):
"""
Coordination number of interstitial site.
Args:
n: Index of interstitial list
"""
return self._defectsite_coord_no[n]
def get_coordinated_sites(self, n):
"""
The sites in structure surrounding the defect site.
Args:
n: Index of defects list
"""
return self._defect_coord_sites[n]
def get_coordinated_elements(self, n):
"""
Elements of sites in structure surrounding the defect site.
Args:
n: Index of defect list
"""
coordinated_species = []
for site in self._defect_coord_sites[n]:
coordinated_species.append(site.specie.symbol)
return list(set(coordinated_species))
@abc.abstractmethod
def make_supercells_with_defects(self, scaling_matrix):
"""
Generate the supercell with input multipliers and create the defect.
First supercell has no defects.
To create unit cell with defect pass unit matrix.
"""
raise NotImplementedError()
class Vacancy(Defect):
"""
Subclass of Defect to generate vacancies and their analysis.
Args:
structure: pymatgen.core.structure.Structure
valences: valences of elements as a dictionary
radii: Radii of elements as a dictionary
"""
def __init__(self, structure, valences, radii):
self._structure = structure
self._valence_dict = valences
self._rad_dict = radii
# Store symmetrically distinct sites, their coordination numbers
# coordinated_sites, effective charge
symm_finder = SpacegroupAnalyzer(self._structure)
symm_structure = symm_finder.get_symmetrized_structure()
equiv_site_seq = symm_structure.equivalent_sites
self._defect_sites = []
self._defect_site_multiplicity = []
for equiv_sites in equiv_site_seq:
self._defect_sites.append(equiv_sites[0])
self._defect_site_multiplicity.append(len(equiv_sites))
self._vac_site_indices = []
for site in self._defect_sites:
for i in range(len(self._structure.sites)):
if site == self._structure[i]:
self._vac_site_indices.append(i)
coord_finder = VoronoiCoordFinder(self._structure)
self._defectsite_coord_no = []
self._defect_coord_sites = []
for i in self._vac_site_indices:
self._defectsite_coord_no.append(
coord_finder.get_coordination_number(i)
)
self._defect_coord_sites.append(
coord_finder.get_coordinated_sites(i)
)
# Store the ionic radii for the elements in the structure
# (Used to computing the surface are and volume)
# Computed based on valence of each element
self._vac_eff_charges = None
self._vol = None
self._sa = None
#@property
#def valence_dict(self):
# return self._valence_dict
def enumerate_defectsites(self):
"""
Returns symmetrically distinct vacancy sites
"""
return self._defect_sites
def get_defectsite_structure_indices(self):
"""
Returns indices of symmetrically distinct vacancy sites
"""
return self._vac_site_indices
def get_defectsite_structure_index(self, n):
"""
index of the vacacy site in the structure.sites list
Args:
n:
Index of vacancy list
"""
return self._vac_site_indices[n]
def get_defectsite_effective_charge(self, n):
"""
Effective charge (In Kroger-Vink notation, cation vacancy has
effectively -ve charge and anion vacancy has +ve charge.)
Args:
n: Index of vacancy list
Returns:
Effective charnge of defect site
"""
# Effective charge (In Kroger-Vink notation, cation vacancy has
# effectively -ve charge and anion vacancy has +ve charge.) Inverse
# the BVAnalyzer.get_valences result.
el = self.get_defectsite(n).species_string
return -self._valence_dict[el]
#if not self._vac_eff_charges:
# self._vac_eff_charges = []
# for site in self.enumerate_defectsites():
# specie = site.specie.symbol
# self._vac_eff_charges.append(-self._valence_dict[specie])
#return self._vac_eff_charges[n]
def get_coordsites_min_max_charge(self, n):
"""
Minimum and maximum charge of sites surrounding the vacancy site.
Args:
n: Index of vacancy list
"""
bv = BVAnalyzer()
struct_valences = bv.get_valences(self._structure)
coordinated_site_valences = []
def _get_index(site):
for i in range(len(self._structure.sites)):
if site.is_periodic_image(self._structure.sites[i]):
return i
raise ValueError("Site not found")
for site in self._defect_coord_sites[n]:
ind = _get_index(site)
coordinated_site_valences.append(struct_valences[ind])
coordinated_site_valences.sort()
return coordinated_site_valences[0], coordinated_site_valences[-1]
# deprecated
def get_volume(self, n):
"""
Volume of the nth vacancy
Args:
n: Index of symmetrically distinct vacancies list
Returns:
floating number representing volume of vacancy
"""
if not self._vol:
self._vol = []
self._sa = []
um = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
sc = self.make_supercells_with_defects(um)[1:]
rad_dict = self.struct_radii
for i in range(len(sc)):
vol, sa = get_void_volume_surfarea(sc[i], rad_dict)
self._vol.append(vol)
self._sa.append(sa)
return self._vol[n]
# deprecated
def get_surface_area(self, n):
"""
Surface area of the nth vacancy
Args:
n: Index of symmetrically distinct vacancies list
Returns:
floating number representing volume of vacancy
"""
if not self._sa:
self._vol = []
self._sa = []
um = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
supercells = self.make_supercells_with_defects(um)[1:]
rad_dict = self.struct_radii
for sc in supercells:
vol, sa = get_void_volume_surfarea(sc, rad_dict)
self._vol.append(vol)
self._sa.append(sa)
return self._sa[n]
def _supercell_with_defect(self, scaling_matrix, defect_site):
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
oldf_coords = defect_site.frac_coords
coords = defect_site.lattice.get_cartesian_coords(oldf_coords)
newf_coords = sc.lattice.get_fractional_coords(coords)
sc_defect_site = PeriodicSite(defect_site.species_and_occu, newf_coords,
sc.lattice,
properties=defect_site.properties)
for i in range(len(sc.sites)):
#if sc_defect_site == sc.sites[i]:
if sc_defect_site.distance(sc.sites[i]) < 1e-3:
del sc[i]
return sc
raise ValueError('Something wrong if reached here')
def make_supercells_with_defects(self, scaling_matrix, species=None,
limit_return_structures=False):
"""
Generate sequence of supercells in pymatgen.core.structure.Structure
format, with each supercell containing one vacancy.
Args:
scaling_matrix: super cell scale parameters in matrix forms
species: Species in list format only for which vacancy supercells
are required. If not specified all the species are considered.
limit_return_structures: Boolean or positive number
If number, only that many structures are returned.
Returns:
Supercells with vacancies. First supercell has no defects.
"""
sc_with_vac = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
sc_with_vac.append(sc)
if not species:
species = sc.symbol_set
if not limit_return_structures:
limit_return_structures = self.defectsite_count()
for defect_site in self.enumerate_defectsites():
if len(sc_with_vac) <= limit_return_structures:
if isinstance(defect_site.specie,Specie):
site_specie = defect_site.specie.element.symbol
elif isinstance(defect_site.specie,Element):
site_specie = defect_site.specie.symbol
else:
raise TypeError("site specie is neither Specie nor Element")
if site_specie in species:
sc_with_vac.append(self._supercell_with_defect(
scaling_matrix, defect_site))
return sc_with_vac
class VacancyFormationEnergy(object):
"""
Using GULP compute the vacancy formation energy.
Works only for binary metal oxides due to the use of Buckingham Potentials
"""
def __init__(self, vacancy):
self._vacancy = vacancy
self._energies = []
def get_energy(self, n, tol=0.5):
"""
Formation Energy for nth symmetrically distinct vacancy.
"""
#generate defect free structure energy
if not self._energies:
no_vac = self._vacancy.defectsite_count()
prev_energies = [0.0] * no_vac
tol_flg = [False] * no_vac
vac_gulp_kw = ('optimise', 'conp', 'qok')
val_dict = self._vacancy.struct_valences
for sp in range(2, 6):
if not (False in tol_flg):
#print sp
break
scale_mat = [[sp, 0, 0], [0, sp, 0], [0, 0, sp]]
sc = self._vacancy.make_supercells_with_defects(scale_mat)
blk_energy = get_energy_buckingham(sc[0])
no = len(sc[0].sites)
#print no
for i in range(1, no_vac + 1):
if not tol_flg[i - 1]:
vac_energy = get_energy_buckingham(
sc[i], keywords=vac_gulp_kw,
valence_dict=val_dict
)
form_energy = vac_energy - (no - 1) / no * blk_energy
if abs(form_energy - prev_energies[i - 1]) < tol:
tol_flg[i - 1] = True
prev_energies[i - 1] = form_energy
self._energies = prev_energies
self._tol_flg = tol_flg
if not self._tol_flg[n]:
print("Caution: tolerance not reached for {0} vacancy".format(n))
return self._energies[n]
class Interstitial(Defect):
"""
Subclass of Defect to generate interstitial sites
"""
def __init__(self, structure, valences, radii, site_type='voronoi_vertex',
accuracy='Normal', symmetry_flag=True, oxi_state = False):
"""
Given a structure, generate symmetrically distinct interstitial sites.
For a non-ionic structure, use oxi_state=True and give atomic radii.
Args:
structure: pymatgen.core.structure.Structure
valences: Dictionary of oxidation states of elements in
{el:valence} form
radii: Radii of elemnts in the structure
site_type: "voronoi_vertex" uses voronoi nodes
"voronoi_edgecenter" uses voronoi polyhedra edge centers
"voronoi_facecenter" uses voronoi polyhedra face centers
"all" combines vertices, edgecenters and facecenters.
Default is "voronoi_vertex"
accuracy: Flag denoting whether to use high accuracy version
of Zeo++. Options are "Normal" and "High". Default is normal.
symmetry_flag: If True, only returns symmetrically distinct sites
oxi_state: If False, input structure is considered devoid of
oxidation-state decoration. And oxi-state for each site is
determined. Use True, if input structure is oxi-state
decorated. This option is useful when the structure is
not electro-neutral after deleting/adding sites. In that
case oxi-decorate the structure before deleting/adding the
sites.
"""
if not oxi_state:
self._structure = ValenceIonicRadiusEvaluator(structure).structure
else:
self._structure = structure
self._valence_dict = valences
self._rad_dict = radii
"""
Use Zeo++ to obtain the voronoi nodes. Apply symmetry reduction
and the symmetry reduced voronoi nodes are possible candidates
for interstitial sites.
"""
if accuracy == "Normal":
high_accuracy_flag = False
elif accuracy == "High":
high_accuracy_flag = True
else:
raise NotImplementedError("Accuracy setting not implemented.")
if accuracy == "High":
if site_type in ('voronoi_facecenter','voronoi_edgecenter','all'):
raise NotImplementedError(
"Site type not implemented for the accuracy setting")
vor_node_sites, vor_edgecenter_sites, vor_facecenter_sites = \
symmetry_reduced_voronoi_nodes(self._structure, self._rad_dict,
high_accuracy_flag, symmetry_flag)
if site_type == 'voronoi_vertex':
possible_interstitial_sites = vor_node_sites
elif site_type == 'voronoi_facecenter':
possible_interstitial_sites = vor_facecenter_sites
elif site_type == 'voronoi_edgecenter':
possible_interstitial_sites = vor_edgecenter_sites
elif site_type == "all":
possible_interstitial_sites = vor_node_sites + \
vor_facecenter_sites + vor_edgecenter_sites
else:
raise ValueError("Input site type not implemented")
#Do futher processing on possibleInterstitialSites to obtain
#interstitial sites
self._defect_sites = possible_interstitial_sites
self._defectsite_coord_no = []
self._defect_coord_sites = []
self._defect_coord_charge = []
self._radii = []
for site in self._defect_sites:
coord_no, coord_sites, chrg = self._get_coord_no_sites_chrg(site)
self._defectsite_coord_no.append(coord_no)
self._defect_coord_sites.append(coord_sites)
self._defect_coord_charge.append(chrg)
for site in self._defect_sites:
vor_radius = site.properties.get('voronoi_radius',None)
if vor_radius:
vor_radius = float(vor_radius)
self._radii.append(vor_radius)
def _get_coord_no_sites_chrg(self, site):
"""
Compute the coordination number and coordination charge
Args:
site:
pymatgen.core.sites.Site
"""
struct = self._structure.copy()
struct.append(site.specie.symbol, site.frac_coords)
coord_finder = VoronoiCoordFinder(struct)
coord_no = coord_finder.get_coordination_number(-1)
coord_sites = coord_finder.get_coordinated_sites(-1)
# In some cases coordination sites to interstitials include
# interstitials also. Filtering them.
def no_inter(site):
return not site.specie.symbol == 'X'
coord_sites = filter(no_inter, coord_sites)
coord_chrg = 0
for site, weight in coord_finder.get_voronoi_polyhedra(-1).items():
if not site.specie.symbol == 'X':
coord_chrg += weight * self._valence_dict[site.species_string]
return coord_no, coord_sites, coord_chrg
def enumerate_defectsites(self):
"""
Enumerate all the symmetrically distinct interstitial sites.
The defect site has "X" as occupied specie.
"""
return self._defect_sites
def append_defectsite(self, site):
"""
Append a site to list of possible interstitials
Args:
site: pymatgen.core.sites.Site
"""
raise NotImplementedError()
def delete_defectsite(self, n):
"""
Remove a symmetrically distinct interstitial site
Args:
n: Index of interstitial site
"""
del self._defect_sites[n]
def get_coordsites_charge_sum(self, n):
"""
Total charge of the interstitial coordinated sites.
Args:
n: Index of interstitial list
"""
return self._defect_coord_charge[n]
def get_coordsites_min_max_charge(self, n):
"""
Minimum and maximum charge of sites surrounding the interstitial site.
Args:
n: Index of symmetrical distinct interstitial site
"""
coord_site_valences = []
for site in self._defect_coord_sites[n]:
coord_site_valences.append(self._valence_dict[site.specie.symbol])
coord_site_valences.sort()
return coord_site_valences[0], coord_site_valences[-1]
def get_radius(self, n):
"""
Volume of the nth interstitial
Args:
n: Index of symmetrically distinct vacancies list
Returns:
floating number representing radius of interstitial sphere
"""
return self._radii[n]
def get_radii(self):
return self._radii
def reduce_defectsites(self):
"""
If multiple defect sites have same voronoi radius, only one is kept.
Useful if the symmetry based reduction of initial sites returned
from Zeo++ is not working properly due to deviation in ideal lattice
coordinates.
"""
distinct_radii = list(set(self._radii))
for rad in distinct_radii:
ind = self._radii.index(rad) # Index of first site with 'rad'
for i in reversed(list(range(ind + 1, len(self._radii)))):
# Backward search for remaining sites so index is not changed
if self._radii[i] == rad:
self._defect_sites.pop(i)
self._defectsite_coord_no.pop(i)
self._defect_coord_sites.pop(i)
self._radii.pop(i)
def radius_prune_defectsites(self, radius):
"""
Remove all the defect sites with voronoi radius less than input radius
"""
for i in reversed(list(range(len(self._radii)))):
if self._radii[i] < radius:
self._defect_sites.pop(i)
self._defectsite_coord_no.pop(i)
self._defect_coord_sites.pop(i)
self._radii.pop(i)
def prune_defectsites(self, el="C", oxi_state=4, dlta=0.1):
"""
Prune all the defect sites which can't acoomodate the input elment
with the input oxidation state.
"""
rad = float(Specie(el, oxi_state).ionic_radius) - dlta
self.radius_prune_defectsites(rad)
def prune_close_defectsites(self, dist=0.2):
"""
Prune the sites that are very close.
"""
#print self.defectsite_count()
ind = 0
while ind < self.defectsite_count():
#i = ind + 1
#while i < self.defectsite_count():
i = self.defectsite_count()-1
#print ind, i
while i > ind:
d = self._defect_sites[ind].distance(self._defect_sites[i])
#print d, dist
if d < dist:
self._defect_sites.pop(i)
#self._defectsite_coord_no.pop(i)
#self._defect_coord_sites.pop(i)
#self._radii.pop(i)
# i += 1
i -= 1
ind += 1
#print self.defectsite_count()
def _supercell_with_defect(self, scaling_matrix, defect_site, element):
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
oldf_coords = defect_site.frac_coords
coords = defect_site.lattice.get_cartesian_coords(oldf_coords)
#print coords
newf_coords = sc.lattice.get_fractional_coords(coords)
for i in range(3):
coord = newf_coords[i]
if coord < 0:
while (coord < 0):
coord = coord+1
newf_coords[i] = coord
elif coord > 1:
while (coord > 1):
coord = coord-1
newf_coords[i] = coord
#print newf_coords
#sc_defect_site = PeriodicSite(element, newf_coords,
# sc.lattice)
try:
sc.append(element, newf_coords, coords_are_cartesian=False,
validate_proximity=True)
except ValueError:
sc = None
finally:
return sc
def make_supercells_with_defects(self, scaling_matrix, element):
"""
Returns sequence of supercells in pymatgen.core.structure.Structure
format, with each supercell containing an interstitial.
First supercell has no defects.
"""
sc_list_with_interstitial = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
sc_list_with_interstitial.append(sc)
for defect_site in self.enumerate_defectsites():
sc_with_inter = self._supercell_with_defect(
scaling_matrix, defect_site, element
)
if sc_with_inter:
sc_list_with_interstitial.append(sc_with_inter)
return sc_list_with_interstitial
class InterstitialAnalyzer(object):
"""
Use GULP to compute the interstitial formation energy, relaxed structures.
Works only for metal oxides due to the use of Buckingham Potentials.
Args:
inter: pymatgen.defects.point_defects.Interstitial
el: Element name in short hand notation ("El")
oxi_state: Oxidtation state
scd: Super cell dimension as number. The scaling is equal along xyz.
"""
def __init__(self, inter, el, oxi_state, scd=2):
self._inter = inter
self._el = el
self._oxi_state = oxi_state
self._scd = scd
self._relax_energies = []
self._norelax_energies = []
self._relax_struct = []
def get_energy(self, n, relax=True):
"""
Formation Energy for nth symmetrically distinct interstitial.
"""
if relax and not self._relax_energies:
self._relax_analysis()
if not relax and not self._norelax_energies:
no_inter = self._inter.defectsite_count()
inter_gulp_kw = ('qok',)
val_dict = self._inter.struct_valences
val_dict[self._el] = self._oxi_state # If element not in structure
scd = self._scd
scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]]
sc = self._inter.make_supercells_with_defects(scale_mat, self._el)
blk_energy = get_energy_buckingham(sc[0])
for i in range(1, no_inter + 1):
inter_energy = get_energy_buckingham(
sc[i], keywords=inter_gulp_kw, valence_dict=val_dict
)
form_energy = inter_energy - blk_energy
self._norelax_energies.append(form_energy)
if relax:
return self._relax_energies[n]
else:
return self._norelax_energies[n]
def _relax_analysis(self):
"""
Optimize interstitial structures
"""
no_inter = self._inter.defectsite_count()
inter_gulp_kw = ('optimise', 'conp', 'qok')
val_dict = self._inter.struct_valences
scd = self._scd
scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]]
sc = self._inter.make_supercells_with_defects(scale_mat, self._el)
blk_energy, rlx_struct = get_energy_relax_structure_buckingham(sc[0])
self._relax_struct.append(rlx_struct)
val_dict[self._el] = self._oxi_state # If element not in structure
for i in range(1, no_inter + 1):
energy, rlx_struct = get_energy_relax_structure_buckingham(
sc[i], keywords=inter_gulp_kw, valence_dict=val_dict
)
form_energy = energy - blk_energy
self._relax_energies.append(form_energy)
self._relax_struct.append(rlx_struct)
def get_relaxed_structure(self, n):
"""
Optimized interstitial structure
Args:
n: Symmetrically distinct interstitial index
.. note::
To get relaxed bulk structure pass -1.
-ve index will not work as expected.
"""
if not self._relax_struct:
self._relax_analysis()
return self._relax_struct[n + 1]
def get_percentage_volume_change(self, n):
"""
Volume change after the introduction of interstitial
Args:
n: Symmetrically distinct interstitial index
"""
if not self._relax_struct:
self._relax_analysis()
blk_struct = self._relax_struct[0]
def_struct = self._relax_struct[n + 1:n + 2][0]
del def_struct.sites[-1]
rv = RelaxationAnalyzer(blk_struct, def_struct)
return rv.get_percentage_volume_change()
def get_percentage_lattice_parameter_change(self, n):
"""
Lattice parameter change after the introduction of interstitial
Args:
n: Symmetrically distinct interstitial index
"""
if not self._relax_struct:
self._relax_analysis()
blk_struct = self._relax_struct[0]
def_struct = self._relax_struct[n + 1:n + 2][0]
del def_struct.sites[-1]
rv = RelaxationAnalyzer(blk_struct, def_struct)
return rv.get_percentage_lattice_parameter_changes()
def get_percentage_bond_distance_change(self, n):
"""
Bond distance change after the introduction of interstitial
Args:
n: Symmetrically distinct interstitial index
"""
if not self._relax_struct:
self._relax_analysis()
blk_struct = self._relax_struct[0]
def_struct = self._relax_struct[n + 1:n + 2][0]
del def_struct.sites[-1]
#print def_struct
rv = RelaxationAnalyzer(blk_struct, def_struct)
return rv.get_percentage_bond_dist_changes()
def relaxed_structure_match(self, i, j):
"""
Check if the relaxed structures of two interstitials match
Args:
i: Symmetrically distinct interstitial index
j: Symmetrically distinct interstitial index
.. note::
To use relaxed bulk structure pass -1.
-ve index will not work as expected
"""
if not self._relax_struct:
self._relax_analysis()
sm = StructureMatcher()
struct1 = self._relax_struct[i + 1]
struct2 = self._relax_struct[j + 1]
return sm.fit(struct1, struct2)
class StructureRelaxer(object):
def __init__(self, structure):
self._unrelax_struct = structure
self.relax()
def relax(self):
energy, rlx_struct = get_energy_relax_structure_buckingham(
self._unrelax_struct)
self._relax_struct = rlx_struct
def get_relaxed_structure(self):
return self._relax_struct
class InterstitialStructureRelaxer(object):
"""
Performs structural relaxation for each interstitial supercell.
Args:
interstitial: Unrelaxed interstitial
el: Species string in short notation
oxi_state: Oxidation state of the element
supercell_dim: Dimensions of super cell
"""
def __init__(self, interstitial, el, oxi_state, supercell_dim=2):
self._inter = interstitial
self._scd = supercell_dim
self._el = el
self._oxi_state = oxi_state
self._relax_structs = []
self._relax_energies = []
def relax(self):
"""
Optimize interstitial structures
"""
no_inter = self._inter.defectsite_count()
inter_gulp_kw = ('optimise', 'conp', 'qok')
val_dict = self._inter.struct_valences
scd = self._scd
scale_mat = [[scd, 0, 0], [0, scd, 0], [0, 0, scd]]
sc = self._inter.make_supercells_with_defects(scale_mat, self._el)
blk_energy, rlx_struct = get_energy_relax_structure_buckingham(sc[0])
self._relax_structs.append(rlx_struct)
self._relax_energies.append(blk_energy)
val_dict[self._el] = self._oxi_state # If element not in structure
for i in range(1, no_inter + 1):
try:
energy, rlx_struct = get_energy_relax_structure_buckingham(
sc[i], keywords=inter_gulp_kw, valence_dict=val_dict
)
self._relax_energies.append(energy)
self._relax_structs.append(rlx_struct)
except:
self._relax_energies.append(None)
self._relax_structs.append(None)
def is_empty(lst):
for value in lst:
if value:
return False
return True
if is_empty(self._relax_energies):
raise IOError('Relaxation failed')
def relaxed_structure_match(self, i, j):
"""
Check if the relaxed structures of two interstitials match
Args:
i: Symmetrically distinct interstitial index
j: Symmetrically distinct interstitial index
.. note::
Index 0 corresponds to bulk.
"""
if not self._relax_structs:
self.relax()
sm = StructureMatcher()
struct1 = self._relax_structs[i]
struct2 = self._relax_structs[j]
return sm.fit(struct1, struct2)
def relaxed_energy_match(self, i, j):
"""
Check if the relaxed energies of two interstitials match
Args:
i: Symmetrically distinct interstitial index
j: Symmetrically distinct interstitial index
.. note::
Index 0 corresponds to bulk.
"""
if not self._relax_energies:
self.relax()
energy1 = self._relax_energies[i]
energy2 = self._relax_energies[j]
return energy1 == energy2
def get_relaxed_structure(self, n):
"""
Get the relaxed structure of nth symmetrically distinct interstitial.
Args:
n: Symmetrically distinct interstitial index
.. note::
0 corresponds to relaxed bulk structure
"""
if not self._relax_structs:
self.relax()
return self._relax_structs[n]
def get_relaxed_energy(self, n):
"""
Get the relaxed structure of nth symmetrically distinct interstitial.
Args:
n: Symmetrically distinct interstitial index
.. note::
0 corresponds to relaxed bulk structure
"""
if not self._relax_energies:
self.relax()
return self._relax_energies[n]
def get_relaxed_interstitial(self):
"""
Get the relaxed structure of nth symmetrically distinct interstitial.
Args:
n: Symmetrically distinct interstitial index
"""
if not self._relax_energies:
self.relax()
energies = self._relax_energies[:]
structs = self._relax_structs[:]
distinct_energy_set = set(energies[1:]) # only interstitial energies
if None in distinct_energy_set:
distinct_energy_set.remove(None)
distinct_structs = [structs[0]] # bulk
distinct_energies = [energies[0]]
for energy in distinct_energy_set:
ind = energies.index(energy)
distinct_structs.append(structs[ind])
distinct_energies.append(energies[ind])
return RelaxedInterstitial(
distinct_structs, distinct_energies, self._inter.struct_valences
)
class RelaxedInterstitial(object):
"""
Stores the relaxed supercell structures for each interstitial
Used to compute formation energies, displacement of atoms near the
the interstitial.
Args:
struct_list: List of structures(supercells). The first structure should
represent relaxed bulk structure and the subsequent ones
interstitial structures (with the extra interstitial site
appended at the end).
energy_list: List of energies for relaxed interstitial structures.
The first energy should correspond to bulk structure
valence_dict: Valences of elements in dictionary form
"""
def __init__(self, struct_list, energy_list, valence_dict):
self._blk_struct = struct_list[0]
struct_list.pop(0)
self._structs = struct_list
self._blk_energy = energy_list[0]
energy_list.pop(0)
self._energies = energy_list
self._valence_dict = valence_dict
self._coord_no = []
self._coord_sites = []
self._coord_charge_no = []
def formation_energy(self, n, chem_pot=0):
"""
Compute the interstitial formation energy
Args:
n: Index of interstitials
chem_pot: Chemical potential of interstitial site element.
If not given, assumed as zero. The user is strongly
urged to supply the chemical potential value
"""
return self._energies[n] - self._blk_energy - chem_pot
def get_percentage_volume_change(self, n):
"""
Volume change after the introduction of interstitial
Args:
n: index of interstitials
"""
def_struct = self._structs[n:n + 1][0]
del def_struct.sites[-1]
rv = RelaxationAnalyzer(self._blk_struct, def_struct)
return rv.get_percentage_volume_change()
def get_percentage_lattice_parameter_change(self, n):
"""
Lattice parameter change after the introduction of interstitial
Args:
n: index of interstitials
"""
def_struct = self._structs[n:n + 1][0] # copy
del def_struct.sites[-1]
rv = RelaxationAnalyzer(self._blk_struct, def_struct)
return rv.get_percentage_lattice_parameter_changes()
def get_percentage_bond_distance_change(self, n):
"""
Bond distance change after the introduction of interstitial.
Args:
n: index of interstitials
"""
def_struct = self._structs[n:n + 1][0] # copy
del def_struct.sites[-1]
rv = RelaxationAnalyzer(self._blk_struct, def_struct)
return rv.get_percentage_bond_dist_changes()
def get_bulk_structure(self):
"""
Return relaxed bulk structure
"""
return self._blk_struct
def get_interstitial_structure(self, n):
"""
Return relaxed bulk structure
"""
return self._structs[n]
def defect_count(self):
"""
Returns the number of distinct interstitials
"""
return len(self._structs)
def get_defectsite(self, n):
"""
Returns the defect site of nth interstitial.
Args:
n: Index of interstitial
"""
return self._structs[n][-1]
def get_coordination_number(self, n):
"""
Coordination number for nth interstitial.
Args:
n: Index of interstitials
"""
if not self._coord_no:
self._coord_find()
return self._coord_no[n]
def get_charge_coordination_number(self, n):
"""
Charge coordination number for nth interstitial.
Args:
n: Index of interstitials
"""
if not self._coord_charge_no:
self._coord_find()
return self._coord_charge_no[n]
def get_coordinated_sites(self, n):
"""
Coordinated sites for nth interstitial.
Args:
n: Index of interstitials
"""
if not self._coord_sites:
self._coord_find()
return self._coord_sites[n]
def get_coordinated_bulk_sites(self, n):
"""
Bulk sites corresponding to the coordinated sites for nth interstitial.
Args:
n: Index of interstitials
"""
blk_sites = []
for site in self.get_coordinated_sites(n):
site_index = self._structs[n].sites.index(site)
blk_sites.append(self._blk_struct[site_index])
return blk_sites
def get_coordinated_site_displacement(self, n):
"""
Compute the total displacement of coordinated sites from the
interstitial sites during the relaxation
Args:
n: Index of defect site
"""
coord_sites = self.get_coordinated_sites(n)
coord_blk_sites = self.get_coordinated_bulk_sites(n)
dist_sum = 0
for i in range(len(coord_sites)):
dist_sum += coord_sites[i].distance_from_point(coord_blk_sites[i])
# How to compute the average?
return dist_sum
def _coord_find(self):
"""
calls VoronoiCoordFinder to compute the coordination number,
coordination charge
"""
for i in range(self.defect_count()):
struct = self._structs[i].copy()
coord_finder = VoronoiCoordFinder(struct)
self._coord_no.append(coord_finder.get_coordination_number(-1))
self._coord_sites.append(coord_finder.get_coordinated_sites(-1))
coord_chrg = 0
for site, weight in coord_finder.get_voronoi_polyhedra(-1).items():
coord_chrg += weight * self._valence_dict[site.species_string]
self._coord_charge_no.append(coord_chrg)
def symmetry_reduced_voronoi_nodes(
structure, rad_dict, high_accuracy_flag=False, symm_flag=True):
"""
Obtain symmetry reduced voronoi nodes using Zeo++ and
pymatgen.symmetry.finder.SpacegroupAnalyzer
Args:
strucutre: pymatgen Structure object
rad_dict: Dictionary containing radii of spcies in the structure
high_accuracy_flag: Flag denotting whether to use high accuracy version of Zeo++
symm_flag: Flag denoting whether to return symmetrically distinct sites only
Returns:
Symmetrically distinct voronoi nodes as pymatgen Strucutre
"""
def add_closest_equiv_site(dist_sites, equiv_sites):
if not dist_sites:
dist_sites.append(equiv_sites[0])
else:
avg_dists = []
for site in equiv_sites:
dists = [site.distance(dst_site, jimage=[0, 0, 0])
for dst_site in dist_sites]
avg_dist = sum(dists) / len(dist_sites)
avg_dists.append(avg_dist)
min_avg_dist = min(avg_dists)
ind = avg_dists.index(min_avg_dist)
dist_sites.append(equiv_sites[ind])
def cmp_memoize_last_site(f): #Compares and stores last site
def not_duplicates(site1, site2):
if site1.distance(site2) < 1e-5:
return False
else:
return True
cmp_memoize_last_site.cache = None
def helper(x):
if not cmp_memoize_last_site.cache:
cmp_memoize_last_site.cache = f(x)
return True
y = f(x)
if not_duplicates(cmp_memoize_last_site.cache, y):
cmp_memoize_last_site.cache = y
return True
else:
return False
return helper
@cmp_memoize_last_site
def check_not_duplicates(site):
return site
if not symm_flag:
if not high_accuracy_flag:
vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct = \
get_voronoi_nodes(structure, rad_dict)
return vor_node_struct.sites, vor_edgecenter_struct.sites, \
vor_facecenter_struct.sites
else:
# Only the nodes are from high accuracy voronoi decomposition
vor_node_struct = \
get_high_accuracy_voronoi_nodes(structure, rad_dict)
# Before getting the symmetry, remove the duplicates
vor_node_struct.sites.sort(key = lambda site: site.voronoi_radius)
#print type(vor_node_struct.sites[0])
dist_sites = filter(check_not_duplicates, vor_node_struct.sites)
return dist_sites, None, None
if not high_accuracy_flag:
vor_node_struct, vor_edgecenter_struct, vor_facecenter_struct = \
get_voronoi_nodes(structure, rad_dict)
vor_node_symmetry_finder = SpacegroupAnalyzer(vor_node_struct, symprec=1e-1)
vor_node_symm_struct = vor_node_symmetry_finder.get_symmetrized_structure()
node_equiv_sites_list = vor_node_symm_struct.equivalent_sites
node_dist_sites = []
for equiv_sites in node_equiv_sites_list:
add_closest_equiv_site(node_dist_sites, equiv_sites)
vor_edge_symmetry_finder = SpacegroupAnalyzer(
vor_edgecenter_struct, symprec=1e-1)
vor_edge_symm_struct = vor_edge_symmetry_finder.get_symmetrized_structure()
edgecenter_equiv_sites_list = vor_edge_symm_struct.equivalent_sites
edgecenter_dist_sites = []
for equiv_sites in edgecenter_equiv_sites_list:
add_closest_equiv_site(edgecenter_dist_sites, equiv_sites)
if not edgecenter_equiv_sites_list: # Fix this so doesn't arise
edgecenter_dist_sites = vor_edgecenter_struct.sites
vor_fc_symmetry_finder = SpacegroupAnalyzer(
vor_facecenter_struct, symprec=1e-1)
vor_fc_symm_struct = vor_fc_symmetry_finder.get_symmetrized_structure()
facecenter_equiv_sites_list = vor_fc_symm_struct.equivalent_sites
facecenter_dist_sites = []
for equiv_sites in facecenter_equiv_sites_list:
add_closest_equiv_site(facecenter_dist_sites, equiv_sites)
if not facecenter_equiv_sites_list: # Fix this so doesn't arise
facecenter_dist_sites = vor_facecenter_struct.sites
return node_dist_sites, edgecenter_dist_sites, facecenter_dist_sites
else:
# Only the nodes are from high accuracy voronoi decomposition
vor_node_struct = \
get_high_accuracy_voronoi_nodes(structure, rad_dict)
# Before getting the symmetry, remove the duplicates
vor_node_struct.sites.sort(key = lambda site: site.voronoi_radius)
#print type(vor_node_struct.sites[0])
dist_sites = list(filter(check_not_duplicates, vor_node_struct.sites))
# Ignore symmetry from ha voronoi nodes
# Increase the symmetry precision to 0.25
#spg = SpacegroupAnalyzer(structure,symprec=1e-1).get_spacegroup()
# Remove symmetrically equivalent sites
#i = 0
#while (i < len(dist_sites)-1):
# sites1 = [dist_sites[i]]
# sites2 = [dist_sites[i+1]]
# if spg.are_symmetrically_equivalent(sites1,sites2):
# del dist_sites[i+1]
# else:
# i = i+1
node_dist_sites = dist_sites
return (node_dist_sites, None, None)
#vor_edge_symmetry_finder = SpacegroupAnalyzer(
# vor_edgecenter_struct, symprec=1e-1)
#vor_edge_symm_struct = vor_edge_symmetry_finder.get_symmetrized_structure()
#edgecenter_equiv_sites_list = vor_edge_symm_struct.equivalent_sites
#edgecenter_dist_sites = []
#for equiv_sites in edgecenter_equiv_sites_list:
# add_closest_equiv_site(edgecenter_dist_sites, equiv_sites)
#if not edgecenter_equiv_sites_list:
# edgecenter_dist_sites = vor_edgecenter_struct.sites
#vor_fc_symmetry_finder = SpacegroupAnalyzer(
# vor_facecenter_struct, symprec=1e-1)
#vor_fc_symm_struct = vor_fc_symmetry_finder.get_symmetrized_structure()
#facecenter_equiv_sites_list = vor_fc_symm_struct.equivalent_sites
#facecenter_dist_sites = []
#for equiv_sites in facecenter_equiv_sites_list:
# add_closest_equiv_site(facecenter_dist_sites, equiv_sites)
#if not facecenter_equiv_sites_list:
# facecenter_dist_sites = vor_facecenter_struct.sites
#return node_dist_sites, edgecenter_dist_sites, facecenter_dist_sites
| 35.464382 | 88 | 0.609312 |
4a271b3a9c7719d7dfc2e1584e001a855e6b03e4 | 1,074 | py | Python | setup.py | filwaline/drf-typed-views | b92c81e8174e1be07f689ab494e4453fcf9cb915 | [
"MIT"
] | null | null | null | setup.py | filwaline/drf-typed-views | b92c81e8174e1be07f689ab494e4453fcf9cb915 | [
"MIT"
] | null | null | null | setup.py | filwaline/drf-typed-views | b92c81e8174e1be07f689ab494e4453fcf9cb915 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from codecs import open
from setuptools import find_packages, setup
def readme():
with open("README.md", "r") as infile:
return infile.read()
classifiers = [
# Pick your license as you wish (should match "license" above)
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
]
setup(
name="drf-typed-views",
version="0.2.1",
description="Use type annotations for automatic request validation in Django REST Framework",
author="Robert Singer",
author_email="[email protected]",
packages=find_packages(exclude=["test_project*"]),
url="https://github.com/rsinger86/drf-typed-views",
license="MIT",
keywords="django rest type annotations automatic validation validate",
long_description=readme(),
classifiers=classifiers,
long_description_content_type="text/markdown",
)
| 30.685714 | 97 | 0.684358 |
4a271d6e249562bb660fd077645ddff858b77b2c | 15,250 | py | Python | cryptoapis/model/get_wallet_transaction_details_by_transaction_ide403.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | cryptoapis/model/get_wallet_transaction_details_by_transaction_ide403.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | cryptoapis/model/get_wallet_transaction_details_by_transaction_ide403.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.banned_ip_address import BannedIpAddress
from cryptoapis.model.banned_ip_address_details import BannedIpAddressDetails
from cryptoapis.model.endpoint_not_allowed_for_api_key import EndpointNotAllowedForApiKey
from cryptoapis.model.endpoint_not_allowed_for_plan import EndpointNotAllowedForPlan
from cryptoapis.model.feature_mainnets_not_allowed_for_plan import FeatureMainnetsNotAllowedForPlan
globals()['BannedIpAddress'] = BannedIpAddress
globals()['BannedIpAddressDetails'] = BannedIpAddressDetails
globals()['EndpointNotAllowedForApiKey'] = EndpointNotAllowedForApiKey
globals()['EndpointNotAllowedForPlan'] = EndpointNotAllowedForPlan
globals()['FeatureMainnetsNotAllowedForPlan'] = FeatureMainnetsNotAllowedForPlan
class GetWalletTransactionDetailsByTransactionIDE403(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'details': ([BannedIpAddressDetails],), # noqa: E501
'code': (str,), # noqa: E501
'message': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'details': 'details', # noqa: E501
'code': 'code', # noqa: E501
'message': 'message', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GetWalletTransactionDetailsByTransactionIDE403 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
details ([BannedIpAddressDetails]): [optional] # noqa: E501
code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501
message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GetWalletTransactionDetailsByTransactionIDE403 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
details ([BannedIpAddressDetails]): [optional] # noqa: E501
code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501
message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
BannedIpAddress,
EndpointNotAllowedForApiKey,
EndpointNotAllowedForPlan,
FeatureMainnetsNotAllowedForPlan,
],
}
| 45.933735 | 484 | 0.604787 |
4a271dce77e6c8643ba4d78e0ca6ad5c2c466758 | 2,638 | py | Python | data/p4VQE/R4/benchmark/startQiskit646.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit646.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startQiskit646.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=15
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.h(input_qubit[0]) # number=12
prog.cz(input_qubit[1],input_qubit[0]) # number=13
prog.h(input_qubit[0]) # number=14
prog.cx(input_qubit[1],input_qubit[0]) # number=9
prog.h(input_qubit[3]) # number=10
prog.x(input_qubit[2]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit646.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 28.06383 | 118 | 0.636088 |
4a271e21046824b1662e0a1db4c1bd4bec69d84c | 9,140 | py | Python | AIXOR/Utils/gi_composites.py | scando1993/MLPXORGate | 0a1eefe32941f45be3c28b40d4f0637b23446fca | [
"BSD-3-Clause"
] | null | null | null | AIXOR/Utils/gi_composites.py | scando1993/MLPXORGate | 0a1eefe32941f45be3c28b40d4f0637b23446fca | [
"BSD-3-Clause"
] | null | null | null | AIXOR/Utils/gi_composites.py | scando1993/MLPXORGate | 0a1eefe32941f45be3c28b40d4f0637b23446fca | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (C) 2015 Dustin Spicuzza <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
from os.path import abspath, join
import inspect
import warnings
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
__all__ = ['GtkTemplate']
class GtkTemplateWarning(UserWarning):
pass
def _connect_func(builder, obj, signal_name, handler_name,
connect_object, flags, cls):
'''Handles GtkBuilder signal connect events'''
if connect_object is None:
extra = ()
else:
extra = (connect_object,)
# The handler name refers to an attribute on the template instance,
# so ask GtkBuilder for the template instance
template_inst = builder.get_object(cls.__gtype_name__)
if template_inst is None: # This should never happen
errmsg = "Internal error: cannot find template instance! obj: %s; " \
"signal: %s; handler: %s; connect_obj: %s; class: %s" % \
(obj, signal_name, handler_name, connect_object, cls)
warnings.warn(errmsg, GtkTemplateWarning)
return
handler = getattr(template_inst, handler_name)
if flags == GObject.ConnectFlags.AFTER:
obj.connect_after(signal_name, handler, *extra)
else:
obj.connect(signal_name, handler, *extra)
template_inst.__connected_template_signals__.add(handler_name)
def _register_template(cls, template_bytes):
'''Registers the template for the widget and hooks init_template'''
# This implementation won't work if there are nested templates, but
# we can't do that anyways due to PyGObject limitations so it's ok
if not hasattr(cls, 'set_template'):
raise TypeError("Requires PyGObject 3.13.2 or greater")
cls.set_template(template_bytes)
bound_methods = set()
bound_widgets = set()
# Walk the class, find marked callbacks and child attributes
for name in dir(cls):
o = getattr(cls, name, None)
if inspect.ismethod(o):
if hasattr(o, '_gtk_callback'):
bound_methods.add(name)
# Don't need to call this, as connect_func always gets called
#cls.bind_template_callback_full(name, o)
elif isinstance(o, _Child):
cls.bind_template_child_full(name, True, 0)
bound_widgets.add(name)
# Have to setup a special connect function to connect at template init
# because the methods are not bound yet
cls.set_connect_func(_connect_func, cls)
cls.__gtemplate_methods__ = bound_methods
cls.__gtemplate_widgets__ = bound_widgets
base_init_template = cls.init_template
cls.init_template = lambda s: _init_template(s, cls, base_init_template)
def _init_template(self, cls, base_init_template):
'''This would be better as an override for Gtk.Widget'''
# TODO: could disallow using a metaclass.. but this is good enough
# .. if you disagree, feel free to fix it and issue a PR :)
if self.__class__ is not cls:
raise TypeError("Inheritance from classes with @GtkTemplate decorators "
"is not allowed at this time")
connected_signals = set()
self.__connected_template_signals__ = connected_signals
base_init_template(self)
for name in self.__gtemplate_widgets__:
widget = self.get_template_child(cls, name)
self.__dict__[name] = widget
if widget is None:
# Bug: if you bind a template child, and one of them was
# not present, then the whole template is broken (and
# it's not currently possible for us to know which
# one is broken either -- but the stderr should show
# something useful with a Gtk-CRITICAL message)
raise AttributeError("A missing child widget was set using "
"GtkTemplate.Child and the entire "
"template is now broken (widgets: %s)" %
', '.join(self.__gtemplate_widgets__))
for name in self.__gtemplate_methods__.difference(connected_signals):
errmsg = ("Signal '%s' was declared with @GtkTemplate.Callback " +
"but was not present in template") % name
warnings.warn(errmsg, GtkTemplateWarning)
# TODO: Make it easier for IDE to introspect this
class _Child(object):
'''
Assign this to an attribute in your class definition and it will
be replaced with a widget defined in the UI file when init_template
is called
'''
__slots__ = []
@staticmethod
def widgets(count):
'''
Allows declaring multiple widgets with less typing::
button \
label1 \
label2 = GtkTemplate.Child.widgets(3)
'''
return [_Child() for _ in range(count)]
class _GtkTemplate(object):
'''
Use this class decorator to signify that a class is a composite
widget which will receive widgets and connect to signals as
defined in a UI template. You must call init_template to
cause the widgets/signals to be initialized from the template::
@GtkTemplate(ui='foo.ui')
class Foo(Gtk.Box):
def __init__(self):
super(Foo, self).__init__()
self.init_template()
The 'ui' parameter can either be a file path or a GResource resource
path::
@GtkTemplate(ui='/org/example/foo.ui')
class Foo(Gtk.Box):
pass
To connect a signal to a method on your instance, do::
@GtkTemplate.Callback
def on_thing_happened(self, widget):
pass
To create a child attribute that is retrieved from your template,
add this to your class definition::
@GtkTemplate(ui='foo.ui')
class Foo(Gtk.Box):
widget = GtkTemplate.Child()
Note: This is implemented as a class decorator, but if it were
included with PyGI I suspect it might be better to do this
in the GObject metaclass (or similar) so that init_template
can be called automatically instead of forcing the user to do it.
.. note:: Due to limitations in PyGObject, you may not inherit from
python objects that use the GtkTemplate decorator.
'''
__ui_path__ = None
@staticmethod
def Callback(f):
'''
Decorator that designates a method to be attached to a signal from
the template
'''
f._gtk_callback = True
return f
Child = _Child
@staticmethod
def set_ui_path(*path):
'''
If using file paths instead of resources, call this *before*
loading anything that uses GtkTemplate, or it will fail to load
your template file
:param path: one or more path elements, will be joined together
to create the final path
TODO: Alternatively, could wait until first class instantiation
before registering templates? Would need a metaclass...
'''
_GtkTemplate.__ui_path__ = abspath(join(*path))
def __init__(self, ui):
self.ui = ui
def __call__(self, cls):
if not issubclass(cls, Gtk.Widget):
raise TypeError("Can only use @GtkTemplate on Widgets")
# Nested templates don't work
if hasattr(cls, '__gtemplate_methods__'):
raise TypeError("Cannot nest template classes")
# Load the template either from a resource path or a file
# - Prefer the resource path first
try:
template_bytes = Gio.resources_lookup_data(self.ui, Gio.ResourceLookupFlags.NONE)
except GLib.GError:
ui = self.ui
if isinstance(ui, (list, tuple)):
ui = join(ui)
if _GtkTemplate.__ui_path__ is not None:
ui = join(_GtkTemplate.__ui_path__, ui)
with open(ui, 'rb') as fp:
template_bytes = GLib.Bytes.new(fp.read())
_register_template(cls, template_bytes)
return cls
# Future shim support if this makes it into PyGI?
#if hasattr(Gtk, 'GtkTemplate'):
# GtkTemplate = lambda c: c
#else:
GtkTemplate = _GtkTemplate
| 35.843137 | 93 | 0.640919 |
4a271e5183ffc876497d94e993e5670e61942531 | 2,508 | py | Python | waf/playground/eclipse/java/junit.py | yankee14/reflow-oven-atmega328p | e6792143576f13f0a3a49edfd54dbb2ef851d95a | [
"Apache-2.0"
] | 24 | 2019-08-13T02:39:01.000Z | 2022-03-03T15:44:54.000Z | waf/playground/eclipse/java/junit.py | yankee14/reflow-oven-atmega328p | e6792143576f13f0a3a49edfd54dbb2ef851d95a | [
"Apache-2.0"
] | 4 | 2020-11-16T02:03:09.000Z | 2021-08-19T08:16:48.000Z | waf/playground/eclipse/java/junit.py | yankee14/reflow-oven-atmega328p | e6792143576f13f0a3a49edfd54dbb2ef851d95a | [
"Apache-2.0"
] | 11 | 2016-07-01T02:21:06.000Z | 2020-11-23T08:29:22.000Z | #! /usr/bin/env python
# encoding: utf-8
"""
JUnit test system
- executes all junit tests in the specified subtree (junitsrc)
- only if --junit is given on the commandline
- method:
- add task to compile junitsrc after compiling srcdir
- additional junit_classpath specifiable
- defaults to classpath + destdir
- add task to run junit tests after they're compiled.
"""
import os
from waflib import Task, TaskGen, Utils, Options
from waflib.TaskGen import feature, before, after
from waflib.Configure import conf
JUNIT_RUNNER = 'org.junit.runner.JUnitCore'
def options(opt):
opt.add_option('--junit', action='store_true', default=False,
help='Run all junit tests', dest='junit')
opt.add_option('--junitpath', action='store', default='',
help='Give a path to the junit jar')
def configure(ctx):
cp = ctx.options.junitpath
val = ctx.env.JUNIT_RUNNER = ctx.env.JUNIT_RUNNER or JUNIT_RUNNER
if ctx.check_java_class(val, with_classpath=cp):
ctx.fatal('Could not run junit from %r' % val)
ctx.env.CLASSPATH_JUNIT = cp
#@feature('junit')
#@after('apply_java', 'use_javac_files')
def make_test(self):
"""make the unit test task"""
if not getattr(self, 'junitsrc', None):
return
junit_task = self.create_task('junit_test')
try:
junit_task.set_run_after(self.javac_task)
except AttributeError:
pass
feature('junit')(make_test)
after('apply_java', 'use_javac_files')(make_test)
class junit_test(Task.Task):
color = 'YELLOW'
vars = ['JUNIT_EXEC_FLAGS', 'JUNIT_RUNNER']
def runnable_status(self):
"""
Only run if --junit was set as an option
"""
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
n = self.generator.path.find_dir(self.generator.junitsrc)
if not n:
self.generator.bld.fatal('no such junit directory %r' % self.generator.junitsrc)
self.base = n
# make sure the tests are executed whenever the .class files change
self.inputs = n.ant_glob('**/*.java')
ret = super(junit_test, self).runnable_status()
if ret == Task.SKIP_ME:
if getattr(Options.options, 'junit', False):
ret = Task.RUN_ME
return ret
def run(self):
cmd = []
cmd.extend(self.env.JAVA)
cmd.append('-classpath')
cmd.append(self.generator.javac_task.env.CLASSPATH + os.pathsep + self.generator.javac_task.env.OUTDIR)
cmd.extend(self.env.JUNIT_EXEC_FLAGS)
cmd.append(self.env.JUNIT_RUNNER)
cmd.extend([x.path_from(self.base).replace('.java', '').replace(os.sep, '.') for x in self.inputs])
return self.exec_command(cmd)
| 29.162791 | 105 | 0.719298 |
4a271e7c6c6aadae4b6f5d576d8acb5caccc7fc5 | 5,534 | py | Python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/aio/operations/_resolve_private_link_service_id_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/aio/operations/_resolve_private_link_service_id_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/aio/operations/_resolve_private_link_service_id_operations.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResolvePrivateLinkServiceIdOperations:
"""ResolvePrivateLinkServiceIdOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def post(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.PrivateLinkResource",
**kwargs
) -> "_models.PrivateLinkResource":
"""Gets the private link service ID for the specified managed cluster.
Gets the private link service ID the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters (name, groupId) supplied in order to resolve a private link
service ID.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.PrivateLinkResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.post.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateLinkResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
post.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId'} # type: ignore
| 49.855856 | 209 | 0.692808 |
4a27205b1746b71ee1ee3316b7b20643e490963a | 1,516 | py | Python | cocoa/inter/result_table.py | astrofrog/dupeguru | d0a3f081dab21ea3d2fc69830c9e71a18078c150 | [
"BSD-3-Clause"
] | 1 | 2017-01-03T05:50:39.000Z | 2017-01-03T05:50:39.000Z | cocoa/inter/result_table.py | astrofrog/dupeguru | d0a3f081dab21ea3d2fc69830c9e71a18078c150 | [
"BSD-3-Clause"
] | null | null | null | cocoa/inter/result_table.py | astrofrog/dupeguru | d0a3f081dab21ea3d2fc69830c9e71a18078c150 | [
"BSD-3-Clause"
] | null | null | null | from objp.util import dontwrap
from cocoa.inter import PyTable, TableView
class ResultTableView(TableView):
def invalidateMarkings(self): pass
class PyResultTable(PyTable):
def powerMarkerMode(self) -> bool:
return self.model.power_marker
def setPowerMarkerMode_(self, value: bool):
self.model.power_marker = value
def deltaValuesMode(self) -> bool:
return self.model.delta_values
def setDeltaValuesMode_(self, value: bool):
self.model.delta_values = value
def valueForRow_column_(self, row_index: int, column: str) -> object:
return self.model.get_row_value(row_index, column)
def isDeltaAtRow_column_(self, row_index: int, column: str) -> bool:
row = self.model[row_index]
return row.is_cell_delta(column)
def renameSelected_(self, newname: str) -> bool:
return self.model.rename_selected(newname)
def sortBy_ascending_(self, key: str, asc: bool):
self.model.sort(key, asc)
def markSelected(self):
self.model.app.toggle_selected_mark_state()
def removeSelected(self):
self.model.app.remove_selected()
def selectedDupeCount(self) -> int:
return self.model.selected_dupe_count
def pathAtIndex_(self, index: int) -> str:
row = self.model[index]
return str(row._dupe.path)
# python --> cocoa
@dontwrap
def invalidate_markings(self):
self.callback.invalidateMarkings()
| 30.32 | 73 | 0.664908 |
4a2720dfdcbfd8bae0de0b51af5f51592acec55c | 11,515 | py | Python | src/maintenance/azext_maintenance/tests/latest/example_steps.py | wwendyc/azure-cli-extensions | 6b4099676bb5d43fdb57bc69f9c0281cca510a0a | [
"MIT"
] | 1 | 2021-08-03T18:32:54.000Z | 2021-08-03T18:32:54.000Z | src/maintenance/azext_maintenance/tests/latest/example_steps.py | wwendyc/azure-cli-extensions | 6b4099676bb5d43fdb57bc69f9c0281cca510a0a | [
"MIT"
] | 4 | 2020-09-07T12:56:24.000Z | 2021-02-04T12:19:20.000Z | src/maintenance/azext_maintenance/tests/latest/example_steps.py | wwendyc/azure-cli-extensions | 6b4099676bb5d43fdb57bc69f9c0281cca510a0a | [
"MIT"
] | 5 | 2020-09-08T22:46:48.000Z | 2020-11-08T14:54:35.000Z | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .. import try_manual
# EXAMPLE: /ApplyUpdateForResourceGroup/get/ApplyUpdatesResourceGroup_List
@try_manual
def step_applyupdate_for_resource_group_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance applyupdate-for-resource-group list '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /ApplyUpdates/put/ApplyUpdates_CreateOrUpdate
@try_manual
def step_applyupdate_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance applyupdate create '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdtest1" '
'--resource-type "virtualMachineScaleSets"',
checks=checks)
# EXAMPLE: /ApplyUpdates/put/ApplyUpdates_CreateOrUpdateParent
@try_manual
def step_applyupdate_create2(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance applyupdate create '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdvm1" '
'--resource-parent-name "smdtest1" '
'--resource-parent-type "virtualMachineScaleSets" '
'--resource-type "virtualMachines"',
checks=checks)
# EXAMPLE: /ApplyUpdates/get/ApplyUpdates_Get
@try_manual
def step_applyupdate_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance applyupdate show '
'--name "{myApplyUpdate}" '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdtest1" '
'--resource-type "virtualMachineScaleSets"',
checks=checks)
# EXAMPLE: /ApplyUpdates/get/ApplyUpdates_GetParent
@try_manual
def step_applyupdate_show_parent(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance applyupdate show-parent '
'--name "{myApplyUpdate}" '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdvm1" '
'--resource-parent-name "smdtest1" '
'--resource-parent-type "virtualMachineScaleSets" '
'--resource-type "virtualMachines"',
checks=checks)
# EXAMPLE: /ApplyUpdates/get/ApplyUpdates_List
@try_manual
def step_applyupdate_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance applyupdate list',
checks=checks)
# EXAMPLE: /MaintenanceConfigurations/put/MaintenanceConfigurations_CreateOrUpdateForResource
@try_manual
def step_configuration_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance configuration create '
'--location "westus2" '
'--maintenance-scope "Host" '
'--maintenance-window-duration "05:00" '
'--maintenance-window-expiration-date-time "9999-12-31 00:00" '
'--maintenance-window-recur-every "Day" '
'--maintenance-window-start-date-time "2025-04-30 08:00" '
'--maintenance-window-time-zone "Pacific Standard Time" '
'--namespace "Microsoft.Maintenance" '
'--visibility "Custom" '
'--resource-group "{rg}" '
'--resource-name "{myMaintenanceConfiguration2}"',
checks=checks)
# EXAMPLE: /MaintenanceConfigurations/get/MaintenanceConfigurations_GetForResource
@try_manual
def step_configuration_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance configuration show '
'--resource-group "{rg}" '
'--resource-name "{myMaintenanceConfiguration2}"',
checks=checks)
# EXAMPLE: /MaintenanceConfigurations/get/MaintenanceConfigurations_List
@try_manual
def step_configuration_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance configuration list',
checks=checks)
# EXAMPLE: /MaintenanceConfigurations/patch/MaintenanceConfigurations_UpdateForResource
@try_manual
def step_configuration_update(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance configuration update '
'--location "westus2" '
'--maintenance-scope "Host" '
'--maintenance-window-duration "05:00" '
'--maintenance-window-expiration-date-time "9999-12-31 00:00" '
'--maintenance-window-recur-every "Month Third Sunday" '
'--maintenance-window-start-date-time "2025-04-30 08:00" '
'--maintenance-window-time-zone "Pacific Standard Time" '
'--namespace "Microsoft.Maintenance" '
'--visibility "Custom" '
'--resource-group "{rg}" '
'--resource-name "{myMaintenanceConfiguration2}"',
checks=checks)
# EXAMPLE: /ConfigurationAssignments/put/ConfigurationAssignments_CreateOrUpdate
@try_manual
def step_assignment_create(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance assignment create '
'--maintenance-configuration-id "/subscriptions/{subscription_id}/resourcegroups/{rg}/providers/Microsoft.'
'Maintenance/maintenanceConfigurations/{myMaintenanceConfiguration2}" '
'--name "{myConfigurationAssignment2}" '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdtest1" '
'--resource-type "virtualMachineScaleSets"',
checks=checks)
# EXAMPLE: /ConfigurationAssignments/put/ConfigurationAssignments_CreateOrUpdateParent
@try_manual
def step_assignment_create2(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance assignment create '
'--maintenance-configuration-id "/subscriptions/{subscription_id}/resourcegroups/{rg}/providers/Microsoft.'
'Maintenance/maintenanceConfigurations/{myMaintenanceConfiguration}" '
'--name "{myConfigurationAssignment}" '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdvm1" '
'--resource-parent-name "smdtest1" '
'--resource-parent-type "virtualMachineScaleSets" '
'--resource-type "virtualMachines"',
checks=checks)
# EXAMPLE: /ConfigurationAssignments/get/ConfigurationAssignments_List
@try_manual
def step_assignment_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance assignment list '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdtest1" '
'--resource-type "virtualMachineScaleSets"',
checks=checks)
# EXAMPLE: /ConfigurationAssignments/get/ConfigurationAssignments_ListParent
@try_manual
def step_assignment_list_parent(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance assignment list-parent '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdtestvm1" '
'--resource-parent-name "smdtest1" '
'--resource-parent-type "virtualMachineScaleSets" '
'--resource-type "virtualMachines"',
checks=checks)
# EXAMPLE: /ConfigurationAssignments/delete/ConfigurationAssignments_Delete
@try_manual
def step_assignment_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance assignment delete -y '
'--name "{myConfigurationAssignment2}" '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdtest1" '
'--resource-type "virtualMachineScaleSets"',
checks=checks)
# EXAMPLE: /ConfigurationAssignments/delete/ConfigurationAssignments_DeleteParent
@try_manual
def step_assignment_delete2(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance assignment delete -y '
'--name "{myConfigurationAssignment2}" '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdvm1" '
'--resource-parent-name "smdtest1" '
'--resource-parent-type "virtualMachineScaleSets" '
'--resource-type "virtualMachines"',
checks=checks)
# EXAMPLE: /MaintenanceConfigurations/delete/MaintenanceConfigurations_DeleteForResource
@try_manual
def step_configuration_delete(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance configuration delete -y '
'--resource-group "{rg}" '
'--resource-name "example1"',
checks=checks)
# EXAMPLE: /MaintenanceConfigurationsForResourceGroup/get/MaintenanceConfigurationsResourceGroup_List
@try_manual
def step_configuration_for_resource_group_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance configuration-for-resource-group list '
'--resource-group "{rg}"',
checks=checks)
# EXAMPLE: /PublicMaintenanceConfigurations/get/PublicMaintenanceConfigurations_GetForResource
@try_manual
def step_public_configuration_show(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance public-configuration show '
'--resource-name "{myMaintenanceConfiguration2}"',
checks=checks)
# EXAMPLE: /PublicMaintenanceConfigurations/get/PublicMaintenanceConfigurations_List
@try_manual
def step_public_configuration_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance public-configuration list',
checks=checks)
# EXAMPLE: /Updates/get/Updates_List
@try_manual
def step_update_list(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance update list '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "smdtest1" '
'--resource-type "virtualMachineScaleSets"',
checks=checks)
# EXAMPLE: /Updates/get/Updates_ListParent
@try_manual
def step_update_list_parent(test, rg, checks=None):
if checks is None:
checks = []
test.cmd('az maintenance update list-parent '
'--provider-name "Microsoft.Compute" '
'--resource-group "{rg}" '
'--resource-name "1" '
'--resource-parent-name "smdtest1" '
'--resource-parent-type "virtualMachineScaleSets" '
'--resource-type "virtualMachines"',
checks=checks)
| 37.025723 | 120 | 0.627703 |
4a2720e777ff4812df9a78c3992e57734dc3abc5 | 2,062 | py | Python | egs2/zh_openslr38/asr1/local/check_train_test_duplicate.py | cuichenx/espnet | dcd4aed1d806e5d0360276a9518119d0376d68da | [
"Apache-2.0"
] | null | null | null | egs2/zh_openslr38/asr1/local/check_train_test_duplicate.py | cuichenx/espnet | dcd4aed1d806e5d0360276a9518119d0376d68da | [
"Apache-2.0"
] | null | null | null | egs2/zh_openslr38/asr1/local/check_train_test_duplicate.py | cuichenx/espnet | dcd4aed1d806e5d0360276a9518119d0376d68da | [
"Apache-2.0"
] | null | null | null | from collections import Counter
train_file = "data/train/text"
train_lines = []
with open(train_file) as f:
for line in f:
if not line:
continue
train_lines.append(line.split()[1])
train_lines = set(train_lines)
for test_name in ("test", "dev"):
test_file = f"data/{test_name}/text"
test_lines = []
test_uttids = []
with open(test_file) as f:
for line in f:
if not line:
continue
test_uttids.append(line.split()[0])
test_lines.append(line.split()[1])
count = 0
duplicate_uttids = [] # duplicate ids in the test file
for t, uttid in zip(test_lines, test_uttids):
if t in train_lines:
duplicate_uttids.append(uttid)
count += 1
duplicate_uttids = set(duplicate_uttids)
print(count, "duplicates in", test_name)
# if input("continue? [y/n]") == 'y':
# remove all instances of duplicate uttids in these files: spk2utt, text, utt2spk, wav.scp
with open(f"data/{test_name}/spk2utt", "r") as f:
# replace all uttid with empty string
text = f.read()
for uttid in duplicate_uttids:
text = text.replace(" " + uttid, "")
for line in text.split("\n"):
if not line:
continue
if len(line.strip().split(" ")) < 2:
print(f"removing {line} from spk2utt")
text = text.replace(line + "\n", "")
with open(f"data/{test_name}/spk2utt", "w") as f:
f.write(text)
for name in ("text", "utt2spk", "wav.scp"):
with open(f"data/{test_name}/{name}", "r") as f:
# remove all lines that contain ids that correspond to duplicate sentences
out_lines = []
for line in f:
if not line.split()[0] in duplicate_uttids:
out_lines.append(line.strip())
with open(f"data/{test_name}/{name}", "w") as f:
f.write("\n".join(out_lines))
f.write("\n")
# else:
# print("ok.")
| 33.258065 | 94 | 0.555286 |
4a27211df49d9b4219d03e5a74d1c102167f32e1 | 71 | py | Python | pyairvisual/const.py | bachya/pyairvisual | 13910a6e21db97243b6c34b09ee8fbd13a62afed | [
"MIT"
] | 6 | 2019-04-04T10:48:12.000Z | 2022-02-01T20:00:53.000Z | pyairvisual/const.py | bachya/pyairvisual | 13910a6e21db97243b6c34b09ee8fbd13a62afed | [
"MIT"
] | 76 | 2018-06-11T21:27:05.000Z | 2022-03-01T18:14:36.000Z | pyairvisual/const.py | bachya/pyairvisual | 13910a6e21db97243b6c34b09ee8fbd13a62afed | [
"MIT"
] | 2 | 2019-07-27T20:51:02.000Z | 2019-11-04T23:31:47.000Z | """Define package-wide constants."""
DEFAULT_REQUEST_TIMEOUT: int = 10
| 23.666667 | 36 | 0.760563 |
4a2721b5a7b25b1d375fa3e8b4a2c6d0e0513116 | 11,018 | py | Python | plotly/graph_objs/heatmapgl/hoverlabel/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | 6 | 2019-05-03T02:12:04.000Z | 2020-03-01T06:33:21.000Z | plotly/graph_objs/heatmapgl/hoverlabel/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | null | null | null | plotly/graph_objs/heatmapgl/hoverlabel/__init__.py | piyush1301/plotly.py | 50cd5c4cd4732042422751c7760acbab8dd8a50d | [
"MIT"
] | 5 | 2019-05-18T16:50:11.000Z | 2021-07-06T21:14:36.000Z |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['colorsrc']
@colorsrc.setter
def colorsrc(self, val):
self['colorsrc'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['familysrc']
@familysrc.setter
def familysrc(self, val):
self['familysrc'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['sizesrc']
@sizesrc.setter
def sizesrc(self, val):
self['sizesrc'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'heatmapgl.hoverlabel'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.heatmapgl.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__('font')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.hoverlabel.Font
constructor must be a dict or
an instance of plotly.graph_objs.heatmapgl.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.heatmapgl.hoverlabel import (font as v_font)
# Initialize validators
# ---------------------
self._validators['color'] = v_font.ColorValidator()
self._validators['colorsrc'] = v_font.ColorsrcValidator()
self._validators['family'] = v_font.FamilyValidator()
self._validators['familysrc'] = v_font.FamilysrcValidator()
self._validators['size'] = v_font.SizeValidator()
self._validators['sizesrc'] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('color', None)
self['color'] = color if color is not None else _v
_v = arg.pop('colorsrc', None)
self['colorsrc'] = colorsrc if colorsrc is not None else _v
_v = arg.pop('family', None)
self['family'] = family if family is not None else _v
_v = arg.pop('familysrc', None)
self['familysrc'] = familysrc if familysrc is not None else _v
_v = arg.pop('size', None)
self['size'] = size if size is not None else _v
_v = arg.pop('sizesrc', None)
self['sizesrc'] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 34.111455 | 82 | 0.563351 |
4a2723bc3123cbb0043d0213f84b7c8510d4efd0 | 14,565 | py | Python | examples/data_augmentation/reinforcement/main.py | bhaskar2443053/forte | 95fabd94126d45c0db07cdcc197049ed1859d228 | [
"Apache-2.0"
] | null | null | null | examples/data_augmentation/reinforcement/main.py | bhaskar2443053/forte | 95fabd94126d45c0db07cdcc197049ed1859d228 | [
"Apache-2.0"
] | null | null | null | examples/data_augmentation/reinforcement/main.py | bhaskar2443053/forte | 95fabd94126d45c0db07cdcc197049ed1859d228 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example of building a reinforcement learning based,
data augmentation enhanced sentence classifier
based on pre-trained BERT model.
"""
import argparse
import functools
import logging
import os
import torch
import torch.nn.functional as F
import texar.torch as tx
from transformers import BertForMaskedLM
from config import config_data, config_classifier
from utils import model_utils
from forte.models.da_rl.aug_wrapper import MetaAugmentationWrapper
from forte.models.da_rl import TexarBertMetaModule
parser = argparse.ArgumentParser()
parser.add_argument(
"--pretrained-model-name",
type=str,
default="bert-base-uncased",
choices=tx.modules.BERTEncoder.available_checkpoints(),
help="Name of the pre-trained downstream checkpoint to load.",
)
parser.add_argument(
"--output-dir",
default="output/",
help="The output directory where the model checkpoints will be written.",
)
parser.add_argument(
"--do-train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do-eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--do-test",
action="store_true",
help="Whether to run test on the test set.",
)
parser.add_argument(
"--augmentation-model-name",
type=str,
default="bert-base-uncased",
choices=tx.modules.BERTEncoder.available_checkpoints(),
help="Name of the pre-trained augmentation model checkpoint to load.",
)
parser.add_argument(
"--num-aug",
type=int,
default=4,
help="number of augmentation samples when fine-tuning aug model",
)
parser.add_argument(
"--classifier-pretrain-epoch",
type=int,
default=10,
help="number of epochs to pretrain the classifier",
)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.root.setLevel(logging.INFO)
class RLAugmentClassifierTrainer:
def __init__(self):
self._prepare_data_iterator()
self._init_aug_model()
self._init_classifier()
def _prepare_data_iterator(self):
tx.utils.maybe_create_dir(args.output_dir)
# Loads data
num_train_data = config_data.num_train_data
self.num_train_steps = int(
num_train_data
/ config_data.train_batch_size
* config_data.max_train_epoch
)
train_dataset = tx.data.RecordData(
hparams=config_data.train_hparam, device=device
)
val_dataset = tx.data.RecordData(
hparams=config_data.eval_hparam, device=device
)
test_dataset = tx.data.RecordData(
hparams=config_data.test_hparam, device=device
)
self.iterator = tx.data.DataIterator(
{"train": train_dataset, "dev": val_dataset, "test": test_dataset}
)
self.val_data_iterator = tx.data.DataIterator({"dev": val_dataset})
self.val_data_iterator.switch_to_dataset("dev")
def _init_aug_model(self):
# pylint: disable=protected-access
# Builds data augmentation BERT
aug_model = BertForMaskedLM.from_pretrained(
args.augmentation_model_name
)
aug_model.to(device)
aug_tokenizer = tx.data.BERTTokenizer(
pretrained_model_name=args.augmentation_model_name
)
input_mask_ids = aug_tokenizer._map_token_to_id("[MASK]")
# Builds augmentation optimizer
aug_lr = 4e-5
param_optimizer = list(aug_model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in param_optimizer
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p
for n, p in param_optimizer
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
aug_optim = tx.core.BertAdam(
optimizer_grouped_parameters,
betas=(0.9, 0.999),
eps=1e-6,
lr=aug_lr,
)
# Builds data augmentation wrapper
self.aug_wrapper = MetaAugmentationWrapper(
aug_model, aug_optim, input_mask_ids, device, args.num_aug
)
def _init_classifier(self):
# Builds BERT for classification task.
config_downstream = {
k: v
for k, v in config_classifier.__dict__.items()
if not k.startswith("__") and k != "hyperparams"
}
self.classifier = tx.modules.BERTClassifier(
pretrained_model_name=args.pretrained_model_name,
hparams=config_downstream,
)
self.classifier.to(device)
# Builds learning rate decay scheduler
classifier_lr = 4e-5
vars_with_decay = []
vars_without_decay = []
for name, param in self.classifier.named_parameters():
if "layer_norm" in name or name.endswith("bias"):
vars_without_decay.append(param)
else:
vars_with_decay.append(param)
opt_params = [
{
"params": vars_with_decay,
"weight_decay": 0.01,
},
{
"params": vars_without_decay,
"weight_decay": 0.0,
},
]
self.optim = tx.core.BertAdam(
opt_params, betas=(0.9, 0.999), eps=1e-6, lr=classifier_lr
)
num_warmup_steps = int(
self.num_train_steps * config_data.warmup_proportion
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optim,
functools.partial(
model_utils.get_lr_multiplier,
total_steps=self.num_train_steps,
warmup_steps=num_warmup_steps,
),
)
def pre_train_classifier_epoch(self):
r"""Pre-trains model on the training set
for better weight initialization.
"""
self.iterator.switch_to_dataset("train")
self.classifier.train()
for _ in range(args.classifier_pretrain_epoch):
for batch in self.iterator:
self.optim.zero_grad()
input_ids = batch["input_ids"]
segment_ids = batch["segment_ids"]
labels = batch["label_ids"]
input_length = (1 - (input_ids == 0).int()).sum(dim=1)
logits, _ = self.classifier(
input_ids, input_length, segment_ids
)
loss = self._compute_loss(logits, labels)
loss.backward()
self.optim.step()
self.scheduler.step()
def train_epoch(self):
r"""Trains on the training set, and evaluates on the validation set
periodically.
"""
self.iterator.switch_to_dataset("train")
self.classifier.train()
self.optim.zero_grad()
for batch in self.iterator:
input_ids = batch["input_ids"]
input_mask = batch["input_mask"]
segment_ids = batch["segment_ids"]
labels = batch["label_ids"]
# Train augmentation model params phi.
self.aug_wrapper.reset_model()
# Iterate over training instances.
num_instances = len(input_ids)
for i in range(num_instances):
features = (
input_ids[i],
input_mask[i],
segment_ids[i],
labels[i],
)
# Augmented instance with params phi exposed
(
aug_probs,
input_mask_aug,
segment_ids_aug,
label_ids_aug,
) = self.aug_wrapper.augment_instance(features)
# Compute classifier loss.
self.classifier.zero_grad()
input_length_aug = ((input_mask_aug == 1).int()).sum(dim=1)
logits, _ = self.classifier(
aug_probs, input_length_aug, segment_ids_aug
)
loss = self._compute_loss(logits, label_ids_aug)
# Update classifier params on meta_model.
meta_model = TexarBertMetaModule(self.classifier)
meta_model = self.aug_wrapper.update_meta_model(
meta_model, loss, self.classifier, self.optim
)
# Compute grads of aug_model on validation data.
for val_batch in self.val_data_iterator: # one batch
val_input_ids = val_batch["input_ids"]
val_segment_ids = val_batch["segment_ids"]
val_labels = val_batch["label_ids"]
val_input_length = (1 - (val_input_ids == 0).int()).sum(
dim=1
)
val_logits, _ = meta_model(
val_input_ids, val_input_length, val_segment_ids
)
val_loss = self._compute_loss(val_logits, val_labels)
val_loss = (
val_loss
/ num_instances
/ args.num_aug
/ len(self.val_data_iterator)
)
val_loss.backward()
# Update aug_model param phi.
self.aug_wrapper.update_phi()
# Train classifier with augmented batch
(
input_probs,
input_masks,
segment_ids,
label_ids,
) = self.aug_wrapper.augment_batch(
(input_ids, input_mask, segment_ids, labels)
)
input_length = ((input_masks == 1).int()).sum(dim=1)
self.optim.zero_grad()
logits, _ = self.classifier(input_probs, input_length, segment_ids)
loss = self._compute_loss(logits, label_ids)
loss.backward()
self.optim.step()
self.scheduler.step()
self._display_logging(loss)
@torch.no_grad()
def eval_epoch(self):
"""Evaluates on the dev set."""
self.iterator.switch_to_dataset("dev")
self.classifier.eval()
nsamples = 0
avg_rec = tx.utils.AverageRecorder()
for batch in self.iterator:
input_ids = batch["input_ids"]
segment_ids = batch["segment_ids"]
labels = batch["label_ids"]
input_length = (1 - (input_ids == 0).int()).sum(dim=1)
logits, preds = self.classifier(
input_ids, input_length, segment_ids
)
loss = self._compute_loss(logits, labels)
accu = tx.evals.accuracy(labels, preds)
batch_size = input_ids.size()[0]
avg_rec.add([accu, loss], batch_size)
nsamples += batch_size
logging.info(
"eval accu: %.4f; loss: %.4f; nsamples: %d",
avg_rec.avg(0),
avg_rec.avg(1),
nsamples,
)
@torch.no_grad()
def test_epoch(self, test_file):
"""Does predictions on the test set."""
self.iterator.switch_to_dataset("test")
self.classifier.eval()
_all_preds = []
nsamples = 0
avg_rec = tx.utils.AverageRecorder()
for batch in self.iterator:
input_ids = batch["input_ids"]
segment_ids = batch["segment_ids"]
labels = batch["label_ids"]
input_length = (1 - (input_ids == 0).int()).sum(dim=1)
logits, preds = self.classifier(
input_ids, input_length, segment_ids
)
loss = self._compute_loss(logits, labels)
accu = tx.evals.accuracy(labels, preds)
batch_size = input_ids.size()[0]
avg_rec.add([accu, loss], batch_size)
nsamples += batch_size
_all_preds.extend(preds.tolist())
logging.info(
"test accu: %.4f; loss: %.4f; nsamples: %d",
avg_rec.avg(0),
avg_rec.avg(1),
nsamples,
)
output_file = os.path.join(args.output_dir, test_file)
with open(output_file, "w+") as writer:
writer.write("\n".join(str(p) for p in _all_preds))
logging.info("test output written to %s", output_file)
def _compute_loss(self, logits, labels):
r"""Compute loss."""
if self.classifier.is_binary:
loss = F.binary_cross_entropy(
logits.view(-1), labels.view(-1), reduction="mean"
)
else:
loss = F.cross_entropy(
logits.view(-1, self.classifier.num_classes),
labels.view(-1),
reduction="mean",
)
return loss
def _display_logging(self, loss):
step = self.scheduler.last_epoch
dis_steps = config_data.display_steps
if dis_steps > 0 and step % dis_steps == 0:
logging.info("step: %d; loss: %f", step, loss)
eval_steps = config_data.eval_steps
if eval_steps > 0 and step % eval_steps == 0:
self._eval_epoch()
self.classifier.train()
def main():
trainer = RLAugmentClassifierTrainer()
trainer.pre_train_classifier_epoch()
if args.do_train:
for k in range(config_data.max_train_epoch):
logging.info("training epoch %d", k)
trainer.train_epoch()
if args.do_eval:
trainer.eval_epoch()
if args.do_test:
trainer.test_epoch("test_results.tsv")
if __name__ == "__main__":
main()
| 33.715278 | 80 | 0.571438 |
4a27243f54ae06b9335053167b023b69346992a4 | 2,769 | py | Python | bindings/python/ensmallen/datasets/networkrepository/sw1000030d2trial2.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/networkrepository/sw1000030d2trial2.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/networkrepository/sw1000030d2trial2.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph SW-10000-3-0d2-trial2.
The graph is automatically retrieved from the NetworkRepository repository.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def Sw1000030d2Trial2(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/networkrepository",
version: str = "latest",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the SW-10000-3-0d2-trial2 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "latest"
The version of the graph to retrieve.
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of SW-10000-3-0d2-trial2 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="Sw1000030d2Trial2",
repository="networkrepository",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 29.147368 | 94 | 0.657999 |
4a2724b63fa05198d9dfeb56eafad6be62b6cb40 | 2,206 | py | Python | kerfuffle!.py | nasioutz/HashNet | e52286ac7eeb4c680c9631cc34b2ccd1291f1cf7 | [
"MIT"
] | null | null | null | kerfuffle!.py | nasioutz/HashNet | e52286ac7eeb4c680c9631cc34b2ccd1291f1cf7 | [
"MIT"
] | null | null | null | kerfuffle!.py | nasioutz/HashNet | e52286ac7eeb4c680c9631cc34b2ccd1291f1cf7 | [
"MIT"
] | null | null | null | import train
import test
from os.path import join
import json
# ------ TRAIN ------
args_train = train.Arguments(net='AlexNet', hash_bit=48, dual_batch=False, scale_tanh=False, tanh_step=200,
#snapshot=("cross-entropy", '2018_12_25_4_19_4', 'iter_07500'),
pretrained=True, alternative_alexnet=False, alt_version=2,
workers=8, batch_size=128, resize_size=256, crop_size=224, num_iterations=2000,
lr=0.005, lr_step=200, lr_decay_factor=0.1,
loss_type="cauchy-cross-entropy", gamma=20, q_lambda=0.0, class_num=56)
config_train = train.produce_config(args_train)
#print()
#print(config_train["loss"])
config_train["out_file"].write(json.dumps(config_train["network"]["params"])+"\n")
config_train["out_file"].write(json.dumps(config_train["loss"])+"\n")
config_train["out_file"].write(json.dumps(config_train["optimizer"])+"\n")
config_train["out_file"].write(json.dumps(config_train["prep"])+"\n")
config_train["out_file"].write(json.dumps(config_train["num_iterations"])+"\n")
config_train["out_file"].write(json.dumps(config_train["snapshot_interval"])+"\n")
config_train["out_file"].write(json.dumps(config_train["hash_bit"])+"\n")
train.train(config_train)
# ------ TEST ------
args_test = test.Arguments(snapshot=(args_train.loss_type, args_train.prefix, 'iter_01500'),
portion_oo_10=5, batch_size=16, crop10=False,
resize_size=args_train.resize_size, crop_size=args_train.crop_size,
hash_bit=args_train.hash_bit
)
config_test = test.produce_config(args_test)
code_and_label = test.predict(config_test)
mAP = test.mean_average_precision(code_and_label, config_test["R"])
print(config_test["snapshot_path"])
print(args_test.snapshot[0] + ": " + args_test.snapshot[1] + " | MAP: " + str(mAP))
print("saving ...")
test.save_code_and_label(code_and_label, join(config_test["output_path"], args_test.snapshot[0]))
config_test["out_file"].write(args_test.snapshot[0] + ": " + args_test.snapshot[1] + " | MAP: " + str(mAP))
print("saving done")
| 43.254902 | 108 | 0.664098 |
4a2725569e99ae0f013a3a8c55a6b2d7e4bd85b6 | 432 | py | Python | ai/admin.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | 1 | 2019-05-26T22:24:01.000Z | 2019-05-26T22:24:01.000Z | ai/admin.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | 6 | 2019-01-22T14:53:43.000Z | 2020-09-22T16:20:28.000Z | ai/admin.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | null | null | null | from django.contrib import admin
from leaflet.admin import LeafletGeoAdmin, LeafletGeoAdminMixin
from cigeo.admin import ArealFieldAdmin
from .models import *
class AiAdmin(ArealFieldAdmin, LeafletGeoAdmin):
change_list_template = "admin/change_list-map.html"
raw_id_fields = ("address",)
default_zoom = 7
default_lon = 1730000
default_lat = 6430000
admin.site.register(Form)
admin.site.register(Ai, AiAdmin)
| 27 | 63 | 0.780093 |
4a27268ac7c024f98f55d0d6addfe11e3c4b2dd4 | 1,864 | py | Python | NC/lagrange_interplotation.py | nmanumr/comsats-scripts | ec7a38c705315170f689f26ce6f6c56bbd87d923 | [
"MIT"
] | 4 | 2021-07-04T16:43:29.000Z | 2021-12-23T16:10:50.000Z | NC/lagrange_interplotation.py | nmanumr/comsats-scripts | ec7a38c705315170f689f26ce6f6c56bbd87d923 | [
"MIT"
] | null | null | null | NC/lagrange_interplotation.py | nmanumr/comsats-scripts | ec7a38c705315170f689f26ce6f6c56bbd87d923 | [
"MIT"
] | null | null | null | from functools import reduce
import operator
from utils import r
def lagrange(x_data, y_data, x, n=1):
out_str = f"$$ f_{{{n}}}(x) = "
if n is None:
n = min(len(x_data), 4)
i = [i for i, e in enumerate(x_data[:-1]) if x_data[i] < x and x < x_data[i+1]][0]
nearest = list(sum(zip(range(i, -1, -1), range(i+1, len(x_data))), ()))[:n+1]
nearest_x = list(map(lambda i: x_data[i], nearest))
nearest_y = list(map(lambda i: y_data[i], nearest))
print(nearest_x, nearest_y)
terms = []
for j in range(0, n+1):
s = "\\frac{"
s += ''.join([f'(x - x_{i})' for i in range(0, n+1) if i != j])
s += "}{"
s += ''.join([f'(x_{j} - x_{i})' for i in range(0, n+1) if i != j])
s += f"}}f(x_{j})"
terms.append(s)
out_str += " + ".join(terms)
out_str += f" $$\n\n$$ f_{{{n}}}({x}) = "
terms = []
terms2 = []
val = 0
for j in range(0, n+1):
s = "\\frac{"
s += ''.join([f'({r(x)} - {r(nearest_x[i])})' for i in range(0, n+1) if i != j])
s += "}{"
s += ''.join([f'({r(nearest_x[j])} - {r(nearest_x[i])})' for i in range(0, n+1) if i != j])
s += f"}} ({r(nearest_y[j])})"
terms.append(s)
u = reduce(operator.mul, [x - nearest_x[i] for i in range(0, n+1) if i != j], 1)
b = reduce(operator.mul, [nearest_x[j] - nearest_x[i] for i in range(0, n+1) if i != j], 1.0)
terms2.append(f'\\frac{{{r(u)}}}{{{r(b)}}}({r(nearest_y[j])})')
val += u/b * nearest_y[j]
out_str += " + ".join(terms)
out_str += " $$\n\n"
out_str += f"$$ f_{{{n}}}({x}) = {' + '.join(terms2)} $$\n\n"
out_str += f"$$ f_{{{n}}}({x}) = {r(val)} $$"
return out_str
if __name__ == '__main__':
print(lagrange([0, 1, 2, 4], [1, 1, 5, 6], 1.5, n=3))
| 31.59322 | 101 | 0.457082 |
4a2727458a549e7f7f3fe2b6241f481aad03acf2 | 4,609 | py | Python | asciimatics/scene.py | peppy0510/asciimatics | 660f2bf9578e78ea2c7bee0c73a3c5277e8afa5f | [
"Apache-2.0"
] | null | null | null | asciimatics/scene.py | peppy0510/asciimatics | 660f2bf9578e78ea2c7bee0c73a3c5277e8afa5f | [
"Apache-2.0"
] | null | null | null | asciimatics/scene.py | peppy0510/asciimatics | 660f2bf9578e78ea2c7bee0c73a3c5277e8afa5f | [
"Apache-2.0"
] | null | null | null | """
This module defines Scene objects for animation purposes. For more details, see
http://asciimatics.readthedocs.io/en/latest/animation.html
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
class Scene(object):
"""
Class to store the details of a single scene to be displayed. This is
made up of a set of :py:obj:`.Effect` objects. See the documentation for
Effect to understand the interaction between the two classes and
http://asciimatics.readthedocs.io/en/latest/animation.html for how to use them together.
"""
def __init__(self, effects, duration=0, clear=True, name=None):
"""
:param effects: The list of effects to apply to this scene.
:param duration: The number of frames in this Scene. A value of 0 means that the Scene
should query the Effects to find the duration. A value of -1 means don't stop.
:param clear: Whether to clear the Screen at the start of the Scene.
:param name: Optional name to identify the scene.
"""
self._effects = []
for effect in effects:
self.add_effect(effect, reset=False)
self._duration = duration
if duration == 0:
self._duration = max([x.stop_frame for x in effects])
self._clear = clear
self._name = name
def reset(self, old_scene=None, screen=None):
"""
Reset the scene ready for playing.
:param old_scene: The previous version of this Scene that was running before the
application reset - e.g. due to a screen resize.
:param screen: New screen to use if old_scene is not None.
"""
# Always reset all the effects.
for effect in self._effects:
effect.reset()
# If we have an old Scene to recreate, get the data out of that and
# apply it where possible by cloning objects where appropriate.
if old_scene:
for old_effect in old_scene.effects:
# Using the "easier to ask forgiveness..." mantra, just try
# cloning everything and ignore any AttributeErrors.
try:
old_effect.clone(screen, self)
except AttributeError:
pass
def exit(self):
"""
Handle any tidy up required on the exit of the Scene.
"""
# Save off any persistent state for each effect.
for effect in self._effects:
if hasattr(effect, "save"):
effect.save()
def add_effect(self, effect, reset=True):
"""
Add an effect to the Scene.
This method can be called at any time - even when playing the Scene. The default logic
assumes that the Effect needs to be reset before being displayed. This can be overridden
using the `reset` parameter.
:param effect: The Effect to be added.
:param reset: Whether to reset the Effect that has just been added.
"""
# Reset the effect in case this is in the middle of a Scene.
if reset:
effect.reset()
effect.register_scene(self)
self._effects.append(effect)
def remove_effect(self, effect):
"""
Remove an effect from the scene.
:param effect: The effect to remove.
"""
self._effects.remove(effect)
def process_event(self, event):
"""
Process a new input event.
This method will pass the event on to any Effects in reverse Z order so that the
top-most Effect has priority.
:param event: The Event that has been triggered.
:returns: None if the Scene processed the event, else the original event.
"""
for effect in reversed(self._effects):
event = effect.process_event(event)
if event is None:
break
return event
@property
def name(self):
"""
:return: The name of this Scene. May be None.
"""
return self._name
@property
def effects(self):
"""
:return: The list of Effects in this Scene.
"""
return self._effects
@property
def duration(self):
"""
:return: The length of the scene in frames.
"""
return self._duration
@property
def clear(self):
"""
:return: Whether the Scene should clear at the start.
"""
return self._clear
| 33.158273 | 97 | 0.610979 |
4a27289856333d22a494b3f41143e6cd8bf1f0e5 | 1,151 | py | Python | TBS/b2c/api/views.py | v1ct0r5u3n/TBS | 459cd7c971f6aab7bd5f4f8d1e0788b90f34e772 | [
"Apache-2.0"
] | null | null | null | TBS/b2c/api/views.py | v1ct0r5u3n/TBS | 459cd7c971f6aab7bd5f4f8d1e0788b90f34e772 | [
"Apache-2.0"
] | null | null | null | TBS/b2c/api/views.py | v1ct0r5u3n/TBS | 459cd7c971f6aab7bd5f4f8d1e0788b90f34e772 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import generics
from django.shortcuts import get_object_or_404
from b2c.models import Order,SalesShare,Refund
from .serializers import OrderSerializer
from .serializers import SalesShareSerializer
from .serializers import RefundSerializer
# Create your views here.
class OrderList(generics.ListCreateAPIView):
queryset = Order.objects.all()
serializer_class = OrderSerializer
class OrderDetail(generics.RetrieveDestroyAPIView):
queryset = Order.objects.all()
serializer_class = OrderSerializer
class SalesShareList(generics.ListCreateAPIView):
queryset = SalesShare.objects.all()
serializer_class = SalesShareSerializer
class SalesShareDetail(generics.RetrieveDestroyAPIView):
queryset = SalesShare.objects.all()
serializer_class = SalesShareSerializer
class RefundList(generics.ListCreateAPIView):
queryset = Refund.objects.all()
serializer_class = RefundSerializer
class RefundDetail(generics.RetrieveDestroyAPIView):
queryset = Refund.objects.all()
serializer_class = RefundSerializer
| 30.289474 | 56 | 0.841877 |
4a2729da3af5c46cbccd253c6101ed4bc60f0db1 | 132 | py | Python | publiquese/publiquese.py | abraji/publique_se | 851dbce302485db52867ed3b21349f5847b94aea | [
"MIT"
] | 5 | 2019-06-28T01:35:45.000Z | 2020-09-22T22:07:00.000Z | publiquese/publiquese.py | abraji/publique_se | 851dbce302485db52867ed3b21349f5847b94aea | [
"MIT"
] | null | null | null | publiquese/publiquese.py | abraji/publique_se | 851dbce302485db52867ed3b21349f5847b94aea | [
"MIT"
] | null | null | null | """Main module."""
from .globals import *
from .api.access import *
from .tse.access import *
from .etl.process_digesto import *
| 14.666667 | 34 | 0.704545 |
4a2729f6fb57765644bd61927c71fd1d6cbb9238 | 2,695 | py | Python | src/cliptools/modules/sanitize.py | bigbirdcode/cliptools | 992ddf2088462477992734af8eb00453bde3ce85 | [
"MIT"
] | null | null | null | src/cliptools/modules/sanitize.py | bigbirdcode/cliptools | 992ddf2088462477992734af8eb00453bde3ce85 | [
"MIT"
] | 6 | 2019-04-02T18:25:35.000Z | 2019-08-21T20:24:16.000Z | src/cliptools/modules/sanitize.py | bigbirdcode/cliptools | 992ddf2088462477992734af8eb00453bde3ce85 | [
"MIT"
] | null | null | null |
"""
Sample code for Chapter 4 - "Text and bytes"
From the book "Fluent Python" by Luciano Ramalho (O'Reilly, 2015)
http://shop.oreilly.com/product/0636920032519.do
Radical folding and text sanitizing.
Handling a string with `cp1252` symbols:
>>> order = '“Herr Voß: • ½ cup of Œtker™ caffè latte • bowl of açaí.”'
>>> shave_marks(order)
'“Herr Voß: • ½ cup of Œtker™ caffe latte • bowl of acai.”'
>>> shave_marks_latin(order)
'“Herr Voß: • ½ cup of Œtker™ caffe latte • bowl of acai.”'
>>> dewinize(order)
'"Herr Voß: - ½ cup of OEtker(TM) caffè latte - bowl of açaí."'
>>> asciize(order)
'"Herr Voss: - 1⁄2 cup of OEtker(TM) caffe latte - bowl of acai."'
Handling a string with Greek and Latin accented characters:
>>> greek = 'Ζέφυρος, Zéfiro'
>>> shave_marks(greek)
'Ζεφυρος, Zefiro'
>>> shave_marks_latin(greek)
'Ζέφυρος, Zefiro'
>>> dewinize(greek)
'Ζέφυρος, Zéfiro'
>>> asciize(greek)
'Ζέφυρος, Zefiro'
"""
# BEGIN SHAVE_MARKS
import unicodedata
import string
def shave_marks(txt):
"""Remove all diacritic marks"""
norm_txt = unicodedata.normalize('NFD', txt) # <1>
shaved = ''.join(c for c in norm_txt
if not unicodedata.combining(c)) # <2>
return unicodedata.normalize('NFC', shaved) # <3>
# END SHAVE_MARKS
# BEGIN SHAVE_MARKS_LATIN
def shave_marks_latin(txt):
"""Remove all diacritic marks from Latin base characters"""
norm_txt = unicodedata.normalize('NFD', txt) # <1>
latin_base = False
keepers = []
for c in norm_txt:
if unicodedata.combining(c) and latin_base: # <2>
continue # ignore diacritic on Latin base char
keepers.append(c) # <3>
# if it isn't combining char, it's a new base char
if not unicodedata.combining(c): # <4>
latin_base = c in string.ascii_letters
shaved = ''.join(keepers)
return unicodedata.normalize('NFC', shaved) # <5>
# END SHAVE_MARKS_LATIN
# BEGIN ASCIIZE
single_map = str.maketrans("""‚ƒ„†ˆ‹‘’“”•–—˜›""", # <1>
"""'f"*^<''""---~>""")
multi_map = str.maketrans({ # <2>
'€': '<euro>',
'…': '...',
'Œ': 'OE',
'™': '(TM)',
'œ': 'oe',
'‰': '<per mille>',
'‡': '**',
})
multi_map.update(single_map) # <3>
def dewinize(txt):
"""Replace Win1252 symbols with ASCII chars or sequences"""
return txt.translate(multi_map) # <4>
def asciize(txt):
no_marks = shave_marks_latin(dewinize(txt)) # <5>
no_marks = no_marks.replace('ß', 'ss') # <6>
return unicodedata.normalize('NFKC', no_marks) # <7>
# END ASCIIZE
| 28.670213 | 75 | 0.592579 |
4a272a2cc76acdc119799b2c4562785e7160a374 | 712 | py | Python | day05/demo01.py | CHExN/practice_python | 10460eebaabe52b64163604506eeaa84ebb4d229 | [
"MIT"
] | null | null | null | day05/demo01.py | CHExN/practice_python | 10460eebaabe52b64163604506eeaa84ebb4d229 | [
"MIT"
] | null | null | null | day05/demo01.py | CHExN/practice_python | 10460eebaabe52b64163604506eeaa84ebb4d229 | [
"MIT"
] | null | null | null | '''
寻找“水仙花数”。
'''
index = 100
while False:
a = int(index / 100)
b = int(index / 10 % 10)
c = int(index % 10)
# print(a, end='\t')
# print(b, end='\t')
# print(c)
if (a * a * a + b * b * b + c * c * c == index):
print(index)
index += 1
num = 100
while True:
s = str(num)
length = len(s)
count = length
num_sum = 0
while count:
# num_sum += ((num // (10**(count - 1))) % 10)**length
num_sum += int(s[count - 1])**length
count -= 1
else:
if num_sum == num:
print("%d is %d bit narcissistic_number" % (num, length))
# else:
# print("%d is not a narcissistic_number" % num)
num += 1
| 21.575758 | 69 | 0.466292 |
4a272a6d6a1492401c30db5d0dca9ace3643b65f | 3,093 | py | Python | pahelix/datasets/bace_dataset.py | agave233/PaddleHelix | e5578f72c2a203a27d9df7da111f1ced826c1429 | [
"Apache-2.0"
] | 454 | 2020-11-21T01:02:45.000Z | 2022-03-29T12:53:40.000Z | pahelix/datasets/bace_dataset.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
] | 161 | 2020-12-12T06:35:54.000Z | 2022-03-27T11:31:13.000Z | pahelix/datasets/bace_dataset.py | chupvl/PaddleHelix | 6e082f89b8090c3c360593d40a08bffc884165dd | [
"Apache-2.0"
] | 108 | 2020-12-07T09:01:10.000Z | 2022-03-31T14:42:29.000Z | #!/usr/bin/python
#-*-coding:utf-8-*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processing of bace dataset.
It contains quantitative IC50 and qualitative (binary label) binding results for
a set of inhibitors of human beta-secretase 1 (BACE=1).
The data are experimental values collected from the scientific literature which
contains 152 compounds and their 2D structures and properties。
You can download the dataset from
http://moleculenet.ai/datasets-1 and load it into pahelix reader creators
"""
import os
from os.path import join, exists
import pandas as pd
import numpy as np
from pahelix.datasets.inmemory_dataset import InMemoryDataset
__all__ = ['get_default_bace_task_names', 'load_bace_dataset']
def get_default_bace_task_names():
"""Get that default bace task names."""
return ['Class']
def load_bace_dataset(data_path, task_names=None):
"""Load bace dataset ,process the classification labels and the input information.
Description:
The data file contains a csv table, in which columns below are used:
mol: The smile representation of the molecular structure;
pIC50: The negative log of the IC50 binding affinity;
class: The binary labels for inhibitor.
Args:
data_path(str): the path to the cached npz path.
task_names(list): a list of header names to specify the columns to fetch from
the csv file.
Returns:
an InMemoryDataset instance.
Example:
.. code-block:: python
dataset = load_bace_dataset('./bace')
print(len(dataset))
References:
[1]Subramanian, Govindan, et al. “Computational modeling of β-secretase 1 (BACE-1) inhibitors using ligand based approaches.” Journal of chemical information and modeling 56.10 (2016): 1936-1949.
"""
if task_names is None:
task_names = get_default_bace_task_names()
raw_path = join(data_path, 'raw')
csv_file = os.listdir(raw_path)[0]
input_df = pd.read_csv(join(raw_path, csv_file), sep=',')
smiles_list = input_df['mol']
labels = input_df[task_names]
# convert 0 to -1
labels = labels.replace(0, -1)
# there are no nans
data_list = []
for i in range(len(smiles_list)):
data = {}
data['smiles'] = smiles_list[i]
data['label'] = labels.values[i]
data_list.append(data)
dataset = InMemoryDataset(data_list)
return dataset
| 30.323529 | 199 | 0.687359 |
4a272b21cb8f31614a340425aefa4949d0aa3461 | 1,365 | py | Python | framework/OutStreams/__init__.py | milljm/raven | 5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b | [
"Apache-2.0"
] | 2 | 2019-10-11T15:59:10.000Z | 2021-04-08T18:23:57.000Z | framework/OutStreams/__init__.py | milljm/raven | 5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b | [
"Apache-2.0"
] | 1 | 2018-03-27T13:06:00.000Z | 2018-03-27T13:06:00.000Z | framework/OutStreams/__init__.py | milljm/raven | 5f29fe81b75e2ffbeb54a55aa63647e7b2f6457b | [
"Apache-2.0"
] | 1 | 2017-08-29T16:09:13.000Z | 2017-08-29T16:09:13.000Z | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The OutStreamManagers module includes the different type of ways to output
data available in RAVEN
Created on April 5, 2016
@author: maljdp
supercedes OutStreamManager.py from alfoa (11/14/2013)
"""
from __future__ import absolute_import
## These lines ensure that we do not have to do something like:
## 'from OutStreamManagers.OutStreamPlot import OutStreamPlot' outside
## of this submodule
from .OutStreamManager import OutStreamManager
from .OutStreamPlot import OutStreamPlot
from .OutStreamPrint import OutStreamPrint
from .Factory import knownTypes
from .Factory import returnInstance
from .Factory import returnClass
# We should not really need this as we do not use wildcard imports
__all__ = ['OutStreamManager','OutStreamPlot','OutStreamPrint']
| 35.921053 | 76 | 0.788278 |
4a272bd64bb05ef971f41aa0c6da8c61fc9d8c2c | 153 | py | Python | lib.py | nfoletia/git-wizard-code | ca3157d665b1ce83fad3546bae88fd676a937017 | [
"MIT"
] | null | null | null | lib.py | nfoletia/git-wizard-code | ca3157d665b1ce83fad3546bae88fd676a937017 | [
"MIT"
] | 1 | 2020-02-22T19:11:55.000Z | 2020-02-22T19:11:55.000Z | lib.py | nfoletia/git-wizard-code | ca3157d665b1ce83fad3546bae88fd676a937017 | [
"MIT"
] | 14 | 2020-02-03T23:19:47.000Z | 2020-02-22T18:27:30.000Z | #! /usr/bin/env python3
'''A library of functions for our cool app'''
def add(a, b):
return a + b
def add1(a):
pass
def sub1(a):
pass
| 9.5625 | 45 | 0.575163 |
4a272cc63c7869ada0d1d35f43af8f9809196776 | 2,610 | py | Python | examples/basic_cog.py | realstealthninja/Stealthy-Wavelink | b51f6f7aab5e79694897b193b57de8f9cdb14852 | [
"MIT"
] | null | null | null | examples/basic_cog.py | realstealthninja/Stealthy-Wavelink | b51f6f7aab5e79694897b193b57de8f9cdb14852 | [
"MIT"
] | null | null | null | examples/basic_cog.py | realstealthninja/Stealthy-Wavelink | b51f6f7aab5e79694897b193b57de8f9cdb14852 | [
"MIT"
] | null | null | null | """MIT License
Copyright (c) 2019-2021 PythonistaGuild
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from disnake.ext import commands
import wavelink
class Bot(commands.Bot):
def __init__(self):
super().__init__(command_prefix='>?')
async def on_ready(self):
print('Bot is ready!')
class Music(commands.Cog):
"""Music cog to hold Wavelink related commands and listeners."""
def __init__(self, bot: commands.Bot):
self.bot = bot
bot.loop.create_task(self.connect_nodes())
async def connect_nodes(self):
"""Connect to our Lavalink nodes."""
await self.bot.wait_until_ready()
await wavelink.NodePool.create_node(bot=self.bot,
host='localhost',
port=2333,
password='youshallnotpass')
@commands.Cog.listener()
async def on_wavelink_node_ready(self, node: wavelink.Node):
"""Event fired when a node has finished connecting."""
print(f'Node: <{node.identifier}> is ready!')
@commands.command()
async def play(self, ctx: commands.Context, *, search: wavelink.YouTubeTrack):
"""Play a song with the given search query.
If not connected, connect to our voice channel.
"""
if not ctx.voice_client:
vc: wavelink.Player = await ctx.author.voice.channel.connect(cls=wavelink.Player)
else:
vc: wavelink.Player = ctx.voice_client
await vc.play(search)
bot = Bot()
bot.add_cog(Music(bot))
bot.run('YOUR_BOT_TOKEN')
| 34.342105 | 93 | 0.682759 |
4a272cf02555d9493642df6f925c1fdd26b2986b | 1,348 | py | Python | awesome-webapp/www/models.py | ThansksJava/learnPython | 64c8df012bf91582ee10459610b2157f535f78ad | [
"Apache-2.0"
] | null | null | null | awesome-webapp/www/models.py | ThansksJava/learnPython | 64c8df012bf91582ee10459610b2157f535f78ad | [
"Apache-2.0"
] | null | null | null | awesome-webapp/www/models.py | ThansksJava/learnPython | 64c8df012bf91582ee10459610b2157f535f78ad | [
"Apache-2.0"
] | null | null | null | import time, uuid
from .orm import Model,StringField,BooleanField,FloatField,TextField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(ddl='varchar(50)')
passwd = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(200)')
content = TextField()
created_at = FloatField(default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
content = TextField()
created_at = FloatField(default=time.time) | 32.878049 | 74 | 0.68546 |
4a272d6d28f8851f45a900b804a489625d40aaf0 | 2,372 | py | Python | Android/NDK/android-ndk-r20b-win/build/extract_platform.py | X018/CCTOOL | 989af4d7edab82bf540400eb72eca4e7447d722c | [
"MIT"
] | null | null | null | Android/NDK/android-ndk-r20b-win/build/extract_platform.py | X018/CCTOOL | 989af4d7edab82bf540400eb72eca4e7447d722c | [
"MIT"
] | null | null | null | Android/NDK/android-ndk-r20b-win/build/extract_platform.py | X018/CCTOOL | 989af4d7edab82bf540400eb72eca4e7447d722c | [
"MIT"
] | null | null | null | #
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Extracts the platform version from the project.properties file."""
from __future__ import print_function
import argparse
import os.path
import re
def parse_args():
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'properties_file', metavar='PROPERTIES_FILE', type=os.path.abspath,
help='Path to the project.properties file.')
return parser.parse_args()
def get_platform(properties_file):
"""Finds and returns the platform version in the properties file.
Returns:
String form of the platform version if found, else "unknown".
"""
android_regex = re.compile(r'(android-\w+)')
vendor_regex = re.compile(r':(\d+)\s*$')
for line in properties_file:
match = android_regex.search(line)
if match is not None:
return match.group(1)
match = vendor_regex.search(line)
if match is not None:
return 'android-{}'.format(match.group(1))
return 'unknown'
def main():
args = parse_args()
# Following the comment in the old awk script, we're trying to match:
#
# target=android-<api>
# target=<vendor>:<name>:<api>
#
# There unfortunately aren't any examples of what the vendor target
# specification actually looks like or where it might be used, so we'll
# just have to mirror the simplistic match that was in the awk script.
#
# android- may be followed by either the numeric API level or the named
# platform. Note that while we can parse any name, ndk-build only support a
# small handful.
with open(args.properties_file) as properties_file:
print(get_platform(properties_file))
if __name__ == '__main__':
main()
| 32.054054 | 79 | 0.693086 |
4a272e3f60726922330f9a69f1c8c048cf929d72 | 3,021 | py | Python | src/oscar/apps/order/admin.py | nibinjose/django-oscar | efd86d048da8696ebfe9ab440c9c9a9f7c128ee9 | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/order/admin.py | nibinjose/django-oscar | efd86d048da8696ebfe9ab440c9c9a9f7c128ee9 | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/order/admin.py | nibinjose/django-oscar | efd86d048da8696ebfe9ab440c9c9a9f7c128ee9 | [
"BSD-3-Clause"
] | 2 | 2022-02-23T11:28:36.000Z | 2022-03-07T10:32:18.000Z | from django.contrib import admin
from oscar.core.loading import get_model
Order = get_model('order', 'Order')
OrderNote = get_model('order', 'OrderNote')
OrderStatusChange = get_model('order', 'OrderStatusChange')
CommunicationEvent = get_model('order', 'CommunicationEvent')
BillingAddress = get_model('order', 'BillingAddress')
ShippingAddress = get_model('order', 'ShippingAddress')
Line = get_model('order', 'Line')
LinePrice = get_model('order', 'LinePrice')
ShippingEvent = get_model('order', 'ShippingEvent')
ShippingEventType = get_model('order', 'ShippingEventType')
PaymentEvent = get_model('order', 'PaymentEvent')
PaymentEventType = get_model('order', 'PaymentEventType')
PaymentEventQuantity = get_model('order', 'PaymentEventQuantity')
LineAttribute = get_model('order', 'LineAttribute')
OrderDiscount = get_model('order', 'OrderDiscount')
Surcharge = get_model('order', 'Surcharge')
class LineInline(admin.TabularInline):
model = Line
extra = 0
class OrderAdmin(admin.ModelAdmin):
raw_id_fields = ['user', 'billing_address', 'shipping_address', ]
list_display = ('number', 'total_incl_tax', 'user',
'billing_address', 'date_placed')
readonly_fields = ('number', 'total_incl_tax', 'total_excl_tax',
'shipping_incl_tax', 'shipping_excl_tax')
inlines = [LineInline]
class LineAdmin(admin.ModelAdmin):
list_display = ('order', 'product', 'stockrecord', 'quantity')
class LinePriceAdmin(admin.ModelAdmin):
list_display = ('order', 'line', 'price_incl_tax', 'quantity')
class ShippingEventTypeAdmin(admin.ModelAdmin):
list_display = ('name', )
class PaymentEventQuantityInline(admin.TabularInline):
model = PaymentEventQuantity
extra = 0
class PaymentEventAdmin(admin.ModelAdmin):
list_display = ('order', 'event_type', 'amount', 'num_affected_lines',
'date_created')
inlines = [PaymentEventQuantityInline]
class PaymentEventTypeAdmin(admin.ModelAdmin):
pass
class OrderDiscountAdmin(admin.ModelAdmin):
readonly_fields = ('order', 'category', 'offer_id', 'offer_name',
'voucher_id', 'voucher_code', 'amount')
list_display = ('order', 'category', 'offer', 'voucher',
'voucher_code', 'amount')
class SurchargeAdmin(admin.ModelAdmin):
raw_id_fields = ("order",)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderNote)
admin.site.register(OrderStatusChange)
admin.site.register(ShippingAddress)
admin.site.register(Line, LineAdmin)
admin.site.register(LinePrice, LinePriceAdmin)
admin.site.register(ShippingEvent)
admin.site.register(ShippingEventType, ShippingEventTypeAdmin)
admin.site.register(PaymentEvent, PaymentEventAdmin)
admin.site.register(PaymentEventType, PaymentEventTypeAdmin)
admin.site.register(LineAttribute)
admin.site.register(OrderDiscount, OrderDiscountAdmin)
admin.site.register(CommunicationEvent)
admin.site.register(BillingAddress)
admin.site.register(Surcharge, SurchargeAdmin)
| 33.566667 | 74 | 0.738497 |
4a272e5de73ec81398429ee013408a2be775cb2a | 1,379 | py | Python | var/spack/repos/builtin/packages/r-makecdfenv/package.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/r-makecdfenv/package.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2022-01-08T08:41:11.000Z | 2022-03-14T19:28:07.000Z | var/spack/repos/builtin/packages/r-makecdfenv/package.py | foeroyingur/spack | 5300cbbb2e569190015c72d0970d25425ea38647 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMakecdfenv(RPackage):
"""CDF Environment Maker
This package has two functions. One reads a Affymetrix chip description
file (CDF) and creates a hash table environment containing the
location/probe set membership mapping. The other creates a package that
automatically loads that environment."""
homepage = "https://bioconductor.org/packages/makecdfenv"
git = "https://git.bioconductor.org/packages/makecdfenv.git"
version('1.66.0', commit='02aa975d543089f5495cb3b4e8edbcf0ff05148a')
version('1.60.0', commit='900ece3ecd7a0ade9f8a0374e5a03def4e079cb3')
version('1.58.0', commit='6f513e39c4920a6da10d22718fc3bf278fe5ffe2')
version('1.56.0', commit='f6b48e9a9f18598653d05bc0bdffeae7fefbb327')
version('1.54.0', commit='3ff646ddc4b028e46b1e091ff9c2d17ce77cec26')
version('1.52.0', commit='b88a3e93e3b7feeeca69eda7c1fc5a0826c81120')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-affyio', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
| 43.09375 | 78 | 0.720812 |
4a272e618c68b989c45d22721c45f06ae93d41e3 | 1,144 | py | Python | figure/grad/figure2.py | HanSeokhyeon/Speech_recogniton_for_English_and_Korean | e0eaf1da1e1ac15f34402fea8cb330d008140d61 | [
"MIT"
] | null | null | null | figure/grad/figure2.py | HanSeokhyeon/Speech_recogniton_for_English_and_Korean | e0eaf1da1e1ac15f34402fea8cb330d008140d61 | [
"MIT"
] | null | null | null | figure/grad/figure2.py | HanSeokhyeon/Speech_recogniton_for_English_and_Korean | e0eaf1da1e1ac15f34402fea8cb330d008140d61 | [
"MIT"
] | null | null | null | """
그림 2. 40 밴드 멜 필터뱅크와 제안하는 방법에서 사용하는 32 밴드 감마톤 필터뱅크의 주파수 응답
"""
import numpy as np
import matplotlib.pyplot as plt
import librosa
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 20
fig = plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
mel_filter_freq = librosa.filters.mel(sr=16000, n_fft=400, n_mels=32).T
plt.plot(mel_filter_freq, color='gray')
plt.title("32-band Mel filterbank")
plt.xlim(0, 201)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175, 200], [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000])
plt.xlabel("Frequency [Hz]")
plt.ylim(0)
plt.yticks([])
plt.ylabel("Gain")
plt.subplot(2, 1, 2)
gammatone_filter = np.fromfile("../fft.raw", dtype=np.float64)
gammatone_filter = gammatone_filter.reshape(32, 2048)
gammatone_filter_freq = np.abs(np.fft.fft(gammatone_filter, axis=1))[:1024].T
plt.plot(gammatone_filter_freq, color='gray')
plt.title("32-band gammatone filterbank")
plt.xlim(0, 1024)
plt.xticks([0, 128, 256, 384, 512, 640, 768, 896, 1024], [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000])
plt.xlabel("Frequency [Hz]")
plt.ylim(0)
plt.yticks([])
plt.ylabel("Gain")
plt.show()
| 24.869565 | 109 | 0.697552 |
4a272f9fb37bc96dd9f4b0e81e5f23757bc24673 | 1,300 | py | Python | samples/azure-quantum/parallel-qrng/parallel_qrng.py | buseorak/Quantum | 8cf50d30cfd4932f79d791245305b5c47ac609a1 | [
"MIT"
] | 1 | 2021-02-17T23:44:18.000Z | 2021-02-17T23:44:18.000Z | samples/azure-quantum/parallel-qrng/parallel_qrng.py | seanwallawalla-forks/Quantum | 24b7497b012e4ac4d533ec30df1794621b753dfa | [
"MIT"
] | 2 | 2022-03-30T15:29:15.000Z | 2022-03-30T15:29:18.000Z | samples/azure-quantum/parallel-qrng/parallel_qrng.py | seanwallawalla-forks/Quantum | 24b7497b012e4ac4d533ec30df1794621b753dfa | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
import qsharp
import qsharp.azure
# Print a list of Q# operations that are available to simulate or execute
# from Python.
print(qsharp.get_available_operations())
from Microsoft.Quantum.Samples import SampleRandomNumber
if __name__ == "__main__":
# Simulate the operation locally.
result = SampleRandomNumber.simulate(nQubits=3)
print(f'Local simulation result: {result}')
# Submit the operation to an Azure Quantum workspace.
if len(sys.argv) < 2:
print(
"Please provide the resource ID for your Azure Quantum workspace as a command-line argument.\n" +
"E.g.: python parallel_qrng.py /subscriptions/subscription-id/Microsoft.Quantum/Workspaces/your-workspace-name\n" +
"You can copy and paste this resource ID from the Quantum Workspace page in the Azure Portal."
)
else:
resource_id = sys.argv[1]
qsharp.azure.connect(resourceId=resource_id)
qsharp.azure.target("ionq.simulator" if len(sys.argv) < 3 else sys.argv[2])
result = qsharp.azure.execute(SampleRandomNumber, nQubits=3, shots=1000, jobName="Generate 3-bit random number")
print(f'Azure Quantum service execution result: {result}')
| 38.235294 | 127 | 0.712308 |
4a272fb5542c803a3dd8456da469fbf3b989cc04 | 850 | py | Python | accounts/urls.py | amyxzhang/pano-server | b3949af178a41b6327b5a74dae96fc466b12f91a | [
"MIT"
] | 22 | 2015-03-10T12:53:36.000Z | 2022-03-01T15:15:39.000Z | accounts/urls.py | amyxzhang/pano-server | b3949af178a41b6327b5a74dae96fc466b12f91a | [
"MIT"
] | 135 | 2015-01-13T03:36:43.000Z | 2018-10-06T20:09:45.000Z | accounts/urls.py | haystack/eyebrowse-server | 582570a9af031efba77f4c50286fdc24dcec1e55 | [
"MIT"
] | 11 | 2015-03-12T21:07:32.000Z | 2018-07-26T16:58:53.000Z | from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
urlpatterns = patterns('accounts.views',
url(r'', include('registration.backends.default.urls')),
url(r'^profile/sharelist$', 'whitelist'),
url(r'^profile/account$', 'account'),
url(r'^profile/mutelist', 'mutelist'),
url(r'^profile/edit_tags', 'edit_tags'),
url(r'^profile/sync_twitter', 'sync_twitter'),
url(r'^profile/sync_delicious', 'sync_delicious'),
url(r'^profile/edit$', 'account'), # old extensions
url(r'^profile/connections$', 'connections'),
url(r'^connect$', 'connect')
)
| 44.736842 | 79 | 0.510588 |
4a27300b38dcb17854008bc79746841debcfe40a | 2,624 | py | Python | ytmusicapi/parsers/library.py | neuenmuller/ytmusicapi | 7c18d8df4be21989ee95a17005dcfb728a067bb2 | [
"MIT"
] | null | null | null | ytmusicapi/parsers/library.py | neuenmuller/ytmusicapi | 7c18d8df4be21989ee95a17005dcfb728a067bb2 | [
"MIT"
] | null | null | null | ytmusicapi/parsers/library.py | neuenmuller/ytmusicapi | 7c18d8df4be21989ee95a17005dcfb728a067bb2 | [
"MIT"
] | null | null | null | from .utils import *
def parse_artists(results, uploaded=False):
artists = []
for result in results:
data = result['musicResponsiveListItemRenderer']
artist = {}
artist['browseId'] = nav(data, NAVIGATION_BROWSE_ID)
artist['artist'] = get_item_text(data, 0)
if uploaded:
artist['songs'] = get_item_text(data, 1).split(' ')[0]
else:
subtitle = get_item_text(data, 1)
if subtitle:
artist['subscribers'] = subtitle.split(' ')[0]
artist['thumbnails'] = nav(data, THUMBNAILS)
artists.append(artist)
return artists
def parse_albums(results, upload=True):
albums = []
for result in results:
data = result['musicTwoRowItemRenderer']
album = {}
album['browseId'] = nav(data, TITLE + NAVIGATION_BROWSE_ID)
album['title'] = nav(data, TITLE_TEXT)
album['type'] = nav(data, SUBTITLE)
album['thumbnails'] = nav(data, THUMBNAIL_RENDERER)
album['artists'] = []
run_count = len(data['subtitle']['runs'])
has_artists = False
if upload:
if run_count == 3:
if nav(data, SUBTITLE2).isdigit():
album['year'] = nav(data, SUBTITLE2)
else:
has_artists = True
elif run_count > 3:
album['year'] = nav(data, SUBTITLE3)
has_artists = True
if has_artists:
subtitle = data['subtitle']['runs'][2]
album['artists'].append({
'name': subtitle['text'],
'id': nav(subtitle, NAVIGATION_BROWSE_ID)
})
else:
album['artists'] = nav(data, SUBTITLE)
album['year'] = nav(data, SUBTITLE2)
album['trackCount'] = nav(data, SUBTITLE3).split(' ')[0]
albums.append(album)
return albums
def parse_library_artists(response, request_func, limit):
results = find_object_by_key(nav(response, SINGLE_COLUMN_TAB + SECTION_LIST),
'itemSectionRenderer')
results = nav(results, ITEM_SECTION)
if 'musicShelfRenderer' not in results:
return []
results = results['musicShelfRenderer']
artists = parse_artists(results['contents'])
if 'continuations' in results:
parse_func = lambda contents: parse_artists(contents)
artists.extend(
get_continuations(results, 'musicShelfContinuation', limit - len(artists),
request_func, parse_func))
return artists
| 33.641026 | 86 | 0.5625 |
4a273096d688a07370d4d5381b53aa107c65a48b | 4,772 | py | Python | PyScripts(elseIsTkinter)/hangman.py | Dario213/My-Python-Scripts | dee96e84e8a892e7a72f96c47a1f161e068572cb | [
"Apache-2.0"
] | null | null | null | PyScripts(elseIsTkinter)/hangman.py | Dario213/My-Python-Scripts | dee96e84e8a892e7a72f96c47a1f161e068572cb | [
"Apache-2.0"
] | null | null | null | PyScripts(elseIsTkinter)/hangman.py | Dario213/My-Python-Scripts | dee96e84e8a892e7a72f96c47a1f161e068572cb | [
"Apache-2.0"
] | null | null | null | from random import randint
from time import sleep
def turn(player_lives: int, player_name: str, sentence: str, sentence_out: str) -> list: # [state_of_choice, reformed_sentence, lives_to_remove]
found = False
letters = "qwertzuiopasdfghjklyxcvbnm"
counter = 0
if player_lives > 0:
letter = input(f"{player_name}, please enter a letter: ")
if len(letter) == 1 and letter in letters:
for i in range(len(sentence)):
if sentence.lower()[i] == letter:
found = True
sentence_out = sentence_out[:i] + letter + sentence_out[i+1:]
counter += 1
elif len(letter) > 1:
if letter.lower() == sentence.lower():
return ["correct_sentence", sentence_out, 0]
else:
return ["incorrect", sentence_out, 1]
else:
return ["incorrect", sentence_out, 1]
if found:
return ["correct", sentence_out, counter] # return[2] -> Num of letter appearences
else:
return ["incorrect", sentence_out, 1]
def count(sentence: str) -> int:
counter = 0
for i in sentence:
if i == '*':
counter += 1
return counter
def game(player_1: str, player_2: str) -> None:
pl_1_lives = 3
pl_2_lives = 3
sentences = [
"Politicians in croatia are dumb",
"I can not believe you are acctually playing this",
"Bruh just do something useful",
"Assume penguin is a cylindrical object",
"Chess is a great game that improves your brain functions",
"People are living in a simulation",
"People are acctually very similar to artificial intelligence",
"Writing this without API makes me wanna die",
"Can not wait to find help with creating sentences"
] #TODO -> Try taking data from some API instead making your own sentences
print (f"Hello, today {player_1} will be playing hangman against {player_2}.\n")
sleep(5)
print(f"1. Every single one of you will have a choice to pick a letter until you find the sentence .\n")
sleep(5)
print(f"2. Every correct letter gives you letter appearence points and every correct sentence gives you\n that points that are left in a sentence(exact number of letters left)\n")
sleep(7)
print("If you enter more than one letter it is considered as an sentence input (until I upgrade code)")
print("\nSTARTING NOW, GLHF!\n")
sleep(5)
letters = "qwertzuiopasdfghjklyxcvbnm"
player_1_points = 0
player_2_points = 0
while len(sentences) > 0:
sentence = sentences[randint(0, len(sentences) - 1)]
sentence_out = ""
for i in sentence.lower():
if i in letters:
sentence_out += "*"
else:
sentence_out += " "
while "*" in sentence_out:
pl_1_lives_curr = 1
pl_2_lives_curr = 1
print("\nSentence: " + sentence_out)
results1 = turn(pl_1_lives_curr, player_1, sentence, sentence_out)
if results1[0] == "correct_sentence":
print(f"\nGreat job! That is correct sentence!\n {str(count(results1[1]))} points for {player_1}!")
player_1_points += count(results1[1])
sentences.remove(sentence)
break
elif results1[0] == "correct":
print(f"\nGreat job! That is correct letter!\nOne point for {player_1}!")
player_1_points += results1[2]
sentence_out = results1[1]
else:
print(f"\nINCORRECT!\nOne point taken from {player_1}!")
player_1_points -= 1
print("\nSentence: " + sentence_out)
results2 = turn(pl_2_lives_curr, player_2, sentence, sentence_out)
if results2[0] == "correct_sentence":
print(f"\nGreat job! That is correct sentence!\n {str(count(results2[1]))} points for {player_2}!")
player_2_points += count(results2[1])
sentences.remove(sentence)
break
elif results2[0] == "correct":
print(f"\nGreat job! That is correct letter!\nOne point for {player_2}!")
player_2_points += results2[2]
sentence_out = results2[1]
else:
print(f"\nINCORRECT!\nOne point taken from {player_2}!")
player_2_points -= 1
print(f"\nAND FOR FINAL CHECKING OF POINTS WINNER IS ...\n")
sleep(9)
if player_1_points == player_2_points:
print(f"NOBODY, WOW, THAT IS SO RARE!!!\nBOTH OF YOU HAD WON {player_1_points}.\n")
sleep(2)
elif player_1_points > player_2_points:
print(f"{player_1} with {player_1_points}!!!\n {player_2} had {player_2_points}.\n")
sleep(2)
else :
print(f"{player_2} with {player_2_points}!!!\n {player_1} had {player_1_points}.\n")
sleep(2)
def main():
player_1 = input("Enter name of player 1: ")
print()
if len(player_1) == 0:
print("I'll call you than Mr_X")
player_1 = "Mr_X"
player_2 = input("Enter name of player 2: ")
print()
if len(player_2) == 0:
print("I'll call you than Mr_Y")
player_2 = "Mr_Y"
game(player_1, player_2)
if __name__ == '__main__':
main()
| 30.987013 | 181 | 0.665968 |
4a2730d3061f280ff0283e41be616f88b2de9809 | 8,372 | py | Python | openstack_dashboard/dashboards/project/routers/ports/forms.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | 1 | 2018-04-17T02:32:05.000Z | 2018-04-17T02:32:05.000Z | openstack_dashboard/dashboards/project/routers/ports/forms.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/project/routers/ports/forms.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | null | null | null | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class AddInterface(forms.SelfHandlingForm):
subnet_id = forms.ThemableChoiceField(label=_("Subnet"))
ip_address = forms.IPField(
label=_("IP Address (optional)"), required=False, initial="",
help_text=_("Specify an IP address for the interface "
"created (e.g. 192.168.0.254)."),
version=forms.IPv4 | forms.IPv6, mask=False)
failure_url = 'horizon:project:routers:detail'
def __init__(self, request, *args, **kwargs):
super(AddInterface, self).__init__(request, *args, **kwargs)
c = self.populate_subnet_id_choices(request)
self.fields['subnet_id'].choices = c
def populate_subnet_id_choices(self, request):
tenant_id = self.request.user.tenant_id
networks = []
router_subnet_ids = []
router_id = self.initial['router_id']
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
if router_id:
ports = api.neutron.port_list(request, device_id=router_id)
router_subnet_ids = [fixed_ip["subnet_id"] for port in ports
for fixed_ip in port.fixed_ips]
except Exception as e:
LOG.info('Failed to get network list: %s', e)
msg = _('Failed to get network list: %s') % e
messages.error(request, msg)
if router_id:
redirect = reverse(self.failure_url, args=[router_id])
else:
redirect = reverse('horizon:project:routers:index')
exceptions.handle(request, msg, redirect=redirect)
return
choices = []
for n in networks:
net_name = n.name + ': ' if n.name else ''
choices += [(subnet.id,
'%s%s (%s)' % (net_name, subnet.cidr,
subnet.name or subnet.id))
for subnet in n['subnets']
if subnet.id not in router_subnet_ids
and subnet.gateway_ip]
if choices:
choices.insert(0, ("", _("Select Subnet")))
else:
choices.insert(0, ("", _("No subnets available")))
return choices
def handle(self, request, data):
if data['ip_address']:
port = self._add_interface_by_port(request, data)
else:
port = self._add_interface_by_subnet(request, data)
msg = _('Interface added')
if port:
msg += ' ' + port.fixed_ips[0]['ip_address']
messages.success(request, msg)
return True
def _add_interface_by_subnet(self, request, data):
router_id = self.initial['router_id']
try:
router_inf = api.neutron.router_add_interface(
request, router_id, subnet_id=data['subnet_id'])
except Exception as e:
self._handle_error(request, router_id, e)
try:
port = api.neutron.port_get(request, router_inf['port_id'])
except Exception:
# Ignore an error when port_get() since it is just
# to get an IP address for the interface.
port = None
return port
def _add_interface_by_port(self, request, data):
router_id = self.initial['router_id']
subnet_id = data['subnet_id']
try:
subnet = api.neutron.subnet_get(request, subnet_id)
except Exception:
msg = _('Unable to get subnet "%s"') % subnet_id
self._handle_error(request, router_id, msg)
try:
ip_address = data['ip_address']
body = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_address}]}
port = api.neutron.port_create(request, **body)
except Exception as e:
self._handle_error(request, router_id, e)
try:
api.neutron.router_add_interface(request, router_id,
port_id=port.id)
except Exception as e:
self._delete_port(request, port)
self._handle_error(request, router_id, e)
return port
def _handle_error(self, request, router_id, reason):
LOG.info('Failed to add_interface: %s', reason)
msg = _('Failed to add interface: %s') % reason
redirect = reverse(self.failure_url, args=[router_id])
exceptions.handle(request, msg, redirect=redirect)
def _delete_port(self, request, port):
try:
api.neutron.port_delete(request, port.id)
except Exception as e:
LOG.info('Failed to delete port %(id)s: %(exc)s',
{'id': port.id, 'exc': e})
msg = _('Failed to delete port %s') % port.id
exceptions.handle(request, msg)
class SetGatewayForm(forms.SelfHandlingForm):
network_id = forms.ThemableChoiceField(label=_("External Network"))
enable_snat = forms.BooleanField(label=_("Enable SNAT"),
initial=True,
required=False)
failure_url = 'horizon:project:routers:index'
def __init__(self, request, *args, **kwargs):
super(SetGatewayForm, self).__init__(request, *args, **kwargs)
networks = self.populate_network_id_choices(request)
self.fields['network_id'].choices = networks
self.ext_gw_mode = api.neutron.is_extension_supported(
self.request, 'ext-gw-mode')
self.enable_snat_allowed = api.neutron.get_feature_permission(
self.request,
"ext-gw-mode",
"update_router_enable_snat")
if not self.ext_gw_mode or not self.enable_snat_allowed:
del self.fields['enable_snat']
def populate_network_id_choices(self, request):
search_opts = {'router:external': True}
try:
networks = api.neutron.network_list(request, **search_opts)
except Exception as e:
LOG.info('Failed to get network list: %s', e)
msg = _('Failed to get network list: %s') % e
messages.error(request, msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
return
choices = [(network.id, network.name or network.id)
for network in networks]
if choices:
choices.insert(0, ("", _("Select network")))
else:
choices.insert(0, ("", _("No networks available")))
return choices
def handle(self, request, data):
try:
enable_snat = None
if 'enable_snat' in data:
enable_snat = data['enable_snat']
api.neutron.router_add_gateway(request,
self.initial['router_id'],
data['network_id'],
enable_snat)
msg = _('Gateway interface is added')
messages.success(request, msg)
return True
except Exception as e:
LOG.info('Failed to set gateway to router %(id)s: %(exc)s',
{'id': self.initial['router_id'], 'exc': e})
msg = _('Failed to set gateway: %s') % e
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| 41.241379 | 78 | 0.582656 |
4a273176e3f257c52533f7713b8070164e502f3e | 2,972 | py | Python | tests/unit/comments/test_comment_signing.py | snapperVibes/lbry-sdk | 77a51d1ad43404e5dc52af715a7bebfaeb3fee16 | [
"MIT"
] | 2 | 2021-12-24T18:29:49.000Z | 2021-12-26T02:04:57.000Z | tests/unit/comments/test_comment_signing.py | snapperVibes/lbry-sdk | 77a51d1ad43404e5dc52af715a7bebfaeb3fee16 | [
"MIT"
] | null | null | null | tests/unit/comments/test_comment_signing.py | snapperVibes/lbry-sdk | 77a51d1ad43404e5dc52af715a7bebfaeb3fee16 | [
"MIT"
] | null | null | null | from lbry.testcase import AsyncioTestCase
import hashlib
from lbry.extras.daemon.comment_client import sign_comment
from lbry.extras.daemon.comment_client import is_comment_signed_by_channel
from tests.unit.wallet.test_schema_signing import get_stream, get_channel
class TestSigningComments(AsyncioTestCase):
@staticmethod
def create_claim_comment_body(comment, claim, channel):
return {
'claim_id': claim.claim_id,
'channel_name': channel.claim_name,
'channel_id': channel.claim_id,
'comment': comment,
'comment_id': hashlib.sha256(comment.encode()).hexdigest()
}
async def test01_successful_create_sign_and_validate_comment(self):
channel = await get_channel('@BusterBluth')
stream = get_stream('pop secret')
comment = self.create_claim_comment_body('Cool stream', stream, channel)
sign_comment(comment, channel)
self.assertTrue(is_comment_signed_by_channel(comment, channel))
async def test02_fail_to_validate_spoofed_channel(self):
pdiddy = await get_channel('@PDitty')
channel2 = await get_channel('@TomHaverford')
stream = get_stream()
comment = self.create_claim_comment_body('Woahh This is Sick!! Shout out 2 my boy Tommy H', stream, pdiddy)
sign_comment(comment, channel2)
self.assertFalse(is_comment_signed_by_channel(comment, pdiddy))
async def test03_successful_sign_abandon_comment(self):
rswanson = await get_channel('@RonSwanson')
dsilver = get_stream('Welcome to the Pawnee, and give a big round for Ron Swanson, AKA Duke Silver')
comment_body = self.create_claim_comment_body('COMPUTER, DELETE ALL VIDEOS OF RON.', dsilver, rswanson)
sign_comment(comment_body, rswanson, sign_comment_id=True)
self.assertTrue(is_comment_signed_by_channel(comment_body, rswanson, sign_comment_id=True))
async def test04_invalid_signature(self):
rswanson = await get_channel('@RonSwanson')
jeanralphio = await get_channel('@JeanRalphio')
chair = get_stream('This is a nice chair. I made it with Mahogany wood and this electric saw')
chair_comment = self.create_claim_comment_body(
'Hah. You use an electric saw? Us swansons have been making chairs with handsaws just three after birth.',
chair,
rswanson
)
sign_comment(chair_comment, rswanson)
self.assertTrue(is_comment_signed_by_channel(chair_comment, rswanson))
self.assertFalse(is_comment_signed_by_channel(chair_comment, jeanralphio))
fake_abandon_signal = chair_comment.copy()
sign_comment(fake_abandon_signal, jeanralphio, sign_comment_id=True)
self.assertFalse(is_comment_signed_by_channel(fake_abandon_signal, rswanson, sign_comment_id=True))
self.assertFalse(is_comment_signed_by_channel(fake_abandon_signal, jeanralphio, sign_comment_id=True))
| 49.533333 | 118 | 0.729475 |
4a2733ebc1c50a3d2c597e89804c314bfac7c68e | 2,141 | py | Python | download-transcripts.py | zuhairmahd/speech-to-text-viewer | 34704c91ac3b5628b402dd85817455f50284ca68 | [
"CC0-1.0"
] | 10 | 2019-08-09T21:01:39.000Z | 2021-08-13T16:41:00.000Z | download-transcripts.py | zuhairmahd/speech-to-text-viewer | 34704c91ac3b5628b402dd85817455f50284ca68 | [
"CC0-1.0"
] | null | null | null | download-transcripts.py | zuhairmahd/speech-to-text-viewer | 34704c91ac3b5628b402dd85817455f50284ca68 | [
"CC0-1.0"
] | 4 | 2020-06-09T13:51:16.000Z | 2021-12-11T22:25:23.000Z | #!/usr/bin/env python -u
"""Download completed AWS Transcribe jobs"""
import json
import os
import sys
import time
import boto3
import botocore
import requests
transcribe = boto3.client("transcribe")
def get_completed_jobs():
next_token = ""
# boto3 will throw an error if you provide an empty NextToken but it doesn't
# have a no-value placeholder so we'll abuse kwargs:
kwargs = {}
while True:
try:
response = transcribe.list_transcription_jobs(Status="COMPLETED", **kwargs)
except botocore.exceptions.ClientError as exc:
if exc.response["Error"]["Code"] == "ThrottlingException":
print("Rate-limiting encountered; will retry in 5 seconds…")
time.sleep(5)
continue
else:
print("Error while listing jobs:", exc, file=sys.stderr)
raise
for summary in response["TranscriptionJobSummaries"]:
yield summary["TranscriptionJobName"]
next_token = response.get("NextToken")
if not next_token:
break
else:
kwargs["NextToken"] = next_token
def download_completed_jobs(results_directory):
for job_name in get_completed_jobs():
output_name = os.path.join(results_directory, "%s.json" % job_name)
if os.path.exists(output_name):
continue
results = transcribe.get_transcription_job(TranscriptionJobName=job_name)
transcript_url = results["TranscriptionJob"]["Transcript"]["TranscriptFileUri"]
print(f"Retrieving {job_name}")
resp = requests.get(transcript_url)
if not resp.ok:
print(
f"{job_name}: HTTP {resp.status_code} {resp.reason} {transcript_url}",
file=sys.stderr,
)
continue
with open(output_name, "w+") as output_file:
json.dump(resp.json(), output_file)
if __name__ == "__main__":
# FIXME: add some command-line parsing:
base_dir = os.path.realpath("results")
os.makedirs(base_dir, exist_ok=True)
download_completed_jobs(base_dir)
| 29.328767 | 87 | 0.630079 |
4a27343a751d2271f72978c0285fac2d2e0bfc4c | 2,905 | py | Python | surveyweights/census/uk_census.py | rethinkpriorities/surveyweights | 9023e69d9721d0d00c5081f1e2c23d8befa63ebb | [
"MIT"
] | 1 | 2020-10-17T03:55:58.000Z | 2020-10-17T03:55:58.000Z | surveyweights/census/uk_census.py | rethinkpriorities/surveyweights | 9023e69d9721d0d00c5081f1e2c23d8befa63ebb | [
"MIT"
] | null | null | null | surveyweights/census/uk_census.py | rethinkpriorities/surveyweights | 9023e69d9721d0d00c5081f1e2c23d8befa63ebb | [
"MIT"
] | null | null | null | UK_CENSUS = {'age': {'18-24': 0.094,
'25-29': 0.068,
'30-34': 0.066,
'35-39': 0.067,
'40-44': 0.073,
'45-49': 0.073,
'50-54': 0.064,
'55-59': 0.057,
'60-64': 0.060,
'65-69': 0.048,
'70-74': 0.039,
'75+': 0.078}, # https://www.ethnicity-facts-figures.service.gov.uk/uk-population-by-ethnicity/demographics/age-groups/latest
'education': {'No qualifications': 0.23,
'Level 1 qualifications': 0.14,
'Level 2 qualifications': 0.15,
'Apprenticeship': 0.03,
'Level 3 qualifications': 0.12,
'Level 4 qualifications and above': 0.27,
'Other qualifications': 0.06}, # https://www.ons.gov.uk/employmentandlabourmarket/peopleinwork/employmentandemployeetypes/bulletins/keystatisticsandquickstatisticsforlocalauthoritiesintheunitedkingdom/2013-12-04
'region': {'North East': 0.04,
'North West': 0.1098,
'Yorkshire And The Humber': 0.0824,
'East Midlands': 0.0725,
'West Midlands': 0.0889,
'East': 0.0935,
'London': 0.1342,
'South East': 0.1374,
'South West': 0.0844,
'Wales': 0.0472,
'Scotland': 0.0815,
'Northern Ireland': 0.0283},
'gender': {'Female': 0.507,
'Male': 0.487,
'Other': 0.006}, # Male-Female from https://www.ethnicity-facts-figures.service.gov.uk/uk-population-by-ethnicity/demographics/male-and-female-populations/latest, other from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5227946/
'income': {'Less than £15,000': 0.12,
'£15,000 to £20,000': 0.21,
'£20,000 and £30,000': 0.29,
'£30,000 to £50,000': 0.24,
'£50,000 to £70,000': 0.07,
'£70,000 to £100,000': 0.04,
'More than £100,000': 0.03},
'vote_brexit': {'Remain': 0.4811,
'Leave': 0.5189}, # https://en.wikipedia.org/wiki/2016_United_Kingdom_European_Union_membership_referendum
'vote2019': {'Conservative': 0.436,
'Labour': 0.321,
'SNP': 0.039,
'Lib Dem': 0.116,
'Other': 0.088}} # 2019 UK election popular vote as recorded by Wikipedia https://en.wikipedia.org/wiki/2019_United_Kingdom_general_election
| 52.818182 | 251 | 0.449914 |
4a2734aab09f9183d07c3795710f60663bdc55ad | 1,098 | py | Python | setup.py | leixin/k2 | 4eb3a9a81754e85325caad5d5c7e2bdc0b8f26e6 | [
"MIT"
] | 144 | 2020-04-17T10:10:57.000Z | 2022-03-25T19:07:54.000Z | setup.py | leixin/k2 | 4eb3a9a81754e85325caad5d5c7e2bdc0b8f26e6 | [
"MIT"
] | 136 | 2020-04-22T10:35:10.000Z | 2021-08-16T13:49:29.000Z | setup.py | leixin/k2 | 4eb3a9a81754e85325caad5d5c7e2bdc0b8f26e6 | [
"MIT"
] | 26 | 2020-04-21T08:23:06.000Z | 2021-09-02T15:23:53.000Z | import setuptools
def get_long_description():
with open('README.md', 'r') as f:
long_description = f.read()
return long_description
version = '0.0.1'
description = 'FSA/FST algorithms, intended to (eventually) be interoperable with PyTorch and similar'
setuptools.setup(
python_requires='>=3.6',
name='k2',
version=version,
author='Daniel Povey',
author_email='[email protected]',
description=description,
keywords='k2, FSA, FST',
long_description=get_long_description(),
long_description_content_type='text/markdown',
url='https://github.com/k2-fsa/k2',
package_dir={'': 'k2/python'},
packages=['k2'],
install_requires=['torch', 'graphviz'],
data_files=[('', ['LICENSE'])],
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: C++',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Operating System :: OS Independent',
],
)
| 29.675676 | 102 | 0.642987 |
4a27359ef256adb947029d5457a2012ad4c28222 | 5,464 | py | Python | pyATMO/Utils.py | tomevans/pyATMO | dbd561bf2b1bfdbcde8c2af3e2bfdf2baebad8f6 | [
"MIT"
] | null | null | null | pyATMO/Utils.py | tomevans/pyATMO | dbd561bf2b1bfdbcde8c2af3e2bfdf2baebad8f6 | [
"MIT"
] | null | null | null | pyATMO/Utils.py | tomevans/pyATMO | dbd561bf2b1bfdbcde8c2af3e2bfdf2baebad8f6 | [
"MIT"
] | null | null | null | from __future__ import print_function
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
import pdb, os, sys
def ReadChem( ATMO, ncdf_fpath='' ):
ncdfFile = scipy.io.netcdf.netcdf_file( ncdf_fpath, mode='r', mmap=False )
z = ncdfFile.variables
molname = []
for i in z['molname'][:]:
molnamei = ''
for j in i:
molnamei += j.decode( 'UTF-8' )
molname += [ molnamei.replace( ' ', '' ) ]
abundance = {}
n = len( molname )
for i in range( n ):
k = molname[i]
abundance[k] = z['abundances'][:][i,:]
#ATMO.Chem = { 'molname':molname, 'pressure_bar':z['pressure'][:]/1e6, \
# 'abundance':z['abundances'][:] }
ATMO.Chem = { 'abundance':abundance, 'pressure_bar':z['pressure'][:]/1e6 }
return None
def ReadPT( ATMO, ncdf_fpath='' ):
"""
Reads an ATMO pressure-temperature profile and stores it in an Nx2
array, where the first column is pressure in bar and the second
column is temperature in Kelvin.
"""
ncdfFile = scipy.io.netcdf.netcdf_file( ncdf_fpath, mode='r', mmap=False )
z = ncdfFile.variables
ATMO.PT = np.column_stack( [ z['pressure'][:]/1e6, z['temperature'][:] ] )
return None
def PlotPT( ATMO, ofigpath='' ):
if hasattr( ATMO, 'PT' ):
fig = plt.figure( figsize=[12,12] )
xlow = 0.1
ylow = 0.1
axw = 0.85
axh = 0.85
ax = fig.add_axes( [ xlow, ylow, axw, axh ] )
lw = 2
c = 'r'
ax.plot( ATMO.PT[:,1], ATMO.PT[:,0], '-', lw=lw, c=c )
ax.set_xscale( 'linear' )
ax.set_yscale( 'log' )
ax.set_ylim( [ ATMO.PT[:,0].max(), ATMO.PT[:,0].min() ] )
fig.savefig( ofigpath )
print( '\nSaved PT figure: {0}\n'.format( ofigpath ) )
else:
print( '\nPT attribute does not exist. Run ReadPT() first.\n' )
return None
def ReadTransmissionModel( ATMO, ncdf_fpath='' ):
ncdfFile = scipy.io.netcdf.netcdf_file( ncdf_fpath, mode='r', mmap=False )
z = ncdfFile.variables
nu = z['nu'][:]
RpRs = z['transit_radius'][:]
wav_micron = (1e4)*( 1./nu )
ixs = np.argsort( wav_micron )
ATMO.TransmissionModel = np.column_stack( [ wav_micron[ixs], RpRs[ixs] ] )
return None
def ReadEmissionModel( ATMO, ncdf_fpath='' ):
ncdfFile = scipy.io.netcdf.netcdf_file( ncdf_fpath, mode='r', mmap=False )
z = ncdfFile.variables
nu = z['nu'][:]
nubandmin = z['nubandmin'][:]
nubandmax = z['nubandmax'][:]
fnu_p = z['fnu'][:]
fnu_s = z['fnu_star'][:]
RpTOA = z['R_planet_TOA'].data
Rs = z['R_star'].data
FpFsTOA = ( ( RpTOA/Rs )**2. )*( fnu_p/fnu_s )
wav_micron = (1e4)*( 1./nu )
ixs = np.argsort( wav_micron )
ATMO.EmissionModelNuCGS = nu[ixs]
ATMO.EmissionModelNuBandMin_cgs = nubandmin
ATMO.EmissionModelNuBandMax_cgs = nubandmax
ATMO.EmissionModelWavMicron = wav_micron[ixs]
ATMO.EmissionModelFpFsTOA = FpFsTOA[ixs]
ATMO.EmissionModelFpnu = fnu_p[ixs]
ATMO.EmissionModelRpTOA_cgs = RpTOA
ATMO.EmissionModelFsnu = fnu_s[ixs]
ATMO.EmissionModelRs_cgs = Rs
return None
def ComputeOpacities( ATMO, species=[ 'H2O', 'CO', 'CH4' ], odir='.' ):
freeze = ATMO.__dict__
species = np.array( species )
n = len( species )
ATMO.nkap = 1
ATMO.scatter = False
print( '\nSaving opacities:' )
for i in range( n ):
ATMO.opacity = [ species[i] ]
ofilename = 'opacity.{0}.ncdf'.format( species[i] )
ATMO.ftrans_spec = os.path.join( odir, ofilename )
ATMO.RunATMO()
print( ATMO.infile_path )
print( '{0}'.format( ATMO.ftrans_spec ) )
# Testing below here:
ATMO.ReadTransmissionModel( ncdf_fpath=ATMO.ftrans_spec )
x = ATMO.TransmissionModel[:,0]
y = ATMO.TransmissionModel[:,1]
plt.figure()
plt.plot(x,y,'-')
plt.title( species[i] )
plt.show()
for key in freeze.keys():
ATMO.__dict__[key] = freeze[key]
return None
def PlotTransmissionModel( ATMO, ofigpath='', xscale='log', yscale='linear' ):
# TODO adapt this from pt profile plotting
plt.ion()
if hasattr( ATMO, 'TransmissionModel' ):
fig = plt.figure( figsize=[14,12] )
vbuff = 0.05
axw = 0.85
axh = ( 1-3*vbuff )/2.
xlow = 0.1
ylow1 = 1 - vbuff - axh
ylow2 = ylow1 - vbuff - axh
ax1 = fig.add_axes( [ xlow, ylow1, axw, axh ] )
ax2 = fig.add_axes( [ xlow, ylow2, axw, axh ] )
lw = 2
c = 'r'
wav_micron = ATMO.TransmissionModel[:,0]
RpRs = ATMO.TransmissionModel[:,1]
axs = [ ax1, ax2 ]
for i in range( 2 ):
axs[i].plot( wav_micron, RpRs, '-', lw=lw, c=c )
axs[i].set_xscale( xscale )
axs[i].set_yscale( yscale )
dRpRs = RpRs.max() - RpRs.min()
x1a = 0.26
x1b = 10.0
x2a = 10.0
x2b = 30.0
ax1.set_xlim( [ x1a, x1b ] )
ax2.set_xlim( [ x2a, x2b ] )
ax1.set_ylim( [ RpRs.min()-0.15*dRpRs, RpRs.max()+0.15*dRpRs ] )
ax2.set_ylim( [ RpRs.min()-0.15*dRpRs, RpRs.max()+0.15*dRpRs ] )
fig.savefig( ofigpath )
print( '\nSaved TransmissionModel figure: {0}\n'.format( ofigpath ) )
else:
print( '\nTransmissionModel attribute does not exist. Run ReadTransmissionModel() first.\n' )
return None
| 33.317073 | 101 | 0.571559 |
4a27361a9244ad3a07fdd0ee8cafe8b54c444a9f | 833 | py | Python | jq/works/migrations/0005_vote.py | Thousif-S/J-Q | 478d69181bf22f23b56c75d50c212d9d4aa0b1a9 | [
"MIT"
] | null | null | null | jq/works/migrations/0005_vote.py | Thousif-S/J-Q | 478d69181bf22f23b56c75d50c212d9d4aa0b1a9 | [
"MIT"
] | 18 | 2020-02-12T01:05:41.000Z | 2022-03-11T23:58:14.000Z | jq/works/migrations/0005_vote.py | Thousif-S/J-Q | 478d69181bf22f23b56c75d50c212d9d4aa0b1a9 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-07-20 10:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('works', '0004_basework_posted_by'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='works.Quest')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.32 | 130 | 0.65066 |
4a27367e79c2de8680f65235f9d062ab604d5f78 | 6,548 | py | Python | src/olympia/reviewers/management/commands/review_reports.py | snifhex/addons-server | 2b9dee65c10c0dca700ff2d25f3694c7cf769816 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/reviewers/management/commands/review_reports.py | snifhex/addons-server | 2b9dee65c10c0dca700ff2d25f3694c7cf769816 | [
"BSD-3-Clause"
] | 1,050 | 2020-07-02T11:48:43.000Z | 2022-03-31T08:11:54.000Z | src/olympia/reviewers/management/commands/review_reports.py | snifhex/addons-server | 2b9dee65c10c0dca700ff2d25f3694c7cf769816 | [
"BSD-3-Clause"
] | 1 | 2019-07-16T00:23:07.000Z | 2019-07-16T00:23:07.000Z | from datetime import date, timedelta
import os
import settings
from django.core.management.base import BaseCommand
from django.db import connection
from django.utils.encoding import force_str
import olympia.core.logger
from olympia.amo.utils import send_mail
from olympia.constants.reviewers import (
POST_REVIEW_WEIGHT_HIGHEST_RISK,
POST_REVIEW_WEIGHT_HIGH_RISK,
POST_REVIEW_WEIGHT_MEDIUM_RISK,
)
from premailer import transform
SQL_DIR = os.path.join(
settings.ROOT, 'src/olympia/reviewers/management/commands/review_reports_sql/'
)
REPORTS = {
'addon': [
(
'Weekly Add-on Reviews, 5 Reviews or More',
os.path.join(SQL_DIR, 'addon/weekly.sql'),
),
(
'Weekly Volunteer Contribution Ratio',
os.path.join(SQL_DIR, 'addon/breakdown.sql'),
),
(
'Weekly Add-on Reviews by Risk Profiles',
os.path.join(SQL_DIR, 'addon/risk.sql'),
),
('Quarterly contributions', os.path.join(SQL_DIR, 'addon/quarterly.sql')),
],
'content': [
(
'Weekly Content Reviews, 10 Reviews or More',
os.path.join(SQL_DIR, 'content/weekly.sql'),
),
(
'Weekly Volunteer Contribution Ratio',
os.path.join(SQL_DIR, 'content/breakdown.sql'),
),
('Quarterly contributions', os.path.join(SQL_DIR, 'content/quarterly.sql')),
],
}
log = olympia.core.logger.getLogger('z.reviewers.review_report')
class Command(BaseCommand):
help = 'Generate and send the review report'
def handle(self, *args, **options):
log.info('Generating add-on reviews report...')
addon_report_data = self.fetch_report_data('addon')
addon_report_html = self.generate_report_html('addon', addon_report_data)
addon_report_subject = '{} {}-{}'.format(
'Weekly Add-on Reviews Report',
self.week_begin,
self.week_end,
)
self.mail_report(
'[email protected]', addon_report_subject, addon_report_html
)
log.info('Generating content reviews report...')
content_report_data = self.fetch_report_data('content')
content_report_html = self.generate_report_html('content', content_report_data)
content_report_subject = '{} {}-{}'.format(
'Weekly Add-on Content Reviews Report',
self.week_begin,
self.week_end,
)
self.mail_report(
'[email protected]',
content_report_subject,
content_report_html,
)
def fetch_report_data(self, group):
today = date.today()
with connection.cursor() as cursor:
# Set variables that are being used in the review report,
# as well as the email output.
cursor.execute(
"""
SET @WEEK_BEGIN=%s;
SET @WEEK_END=%s;
SET @QUARTER_BEGIN=%s;
SET @RISK_HIGHEST=%s;
SET @RISK_HIGH=%s;
SET @RISK_MEDIUM=%s;
""",
[
today - timedelta(days=today.weekday() + 7),
today - timedelta(days=today.weekday() + 1),
date(today.year, (today.month - 1) // 3 * 3 + 1, 1),
POST_REVIEW_WEIGHT_HIGHEST_RISK,
POST_REVIEW_WEIGHT_HIGH_RISK,
POST_REVIEW_WEIGHT_MEDIUM_RISK,
],
)
# Read the beginning/end of the week
# in order to put it in the email.
cursor.execute('SELECT @WEEK_BEGIN, @WEEK_END;')
data = cursor.fetchone()
self.week_begin = data[0]
self.week_end = data[1]
report_data = []
for header, query_file in REPORTS.get(group):
with open(query_file) as report_query:
query_string = report_query.read().replace('\n', ' ')
cursor.execute(query_string)
table_header = []
for descr in cursor.description:
table_header.append(descr[0])
table_content = cursor.fetchall()
table_content = tuple(
tuple(force_str(item) for item in row) for row in table_content
)
report_data.append((header, table_header, table_content))
return report_data
def generate_report_html(self, group, report_data):
# Pre-set email with style information and header
all_html = """
<style>
h1 {{ margin: 0; padding: 0; }}
h2 {{ margin: 0; padding: 30px 0 10px 0; }}
th {{ text-align: left; }}
th, td {{ padding: 0 12px; }}
td {{ text-align: right; white-space: nowrap; }}
td:first-child {{ text-align: left; white-space: nowrap; }}
</style>
<h1>Weekly Add-on {}Reviews Report</h1>
<h3>{} - {}</h3>
""".format(
('Content ' if group == 'content' else ''),
self.week_begin,
self.week_end,
)
# For each group, execute the individual SQL reports
# and build the HTML email.
for section in report_data:
all_html += '<h2>%s</h2>\n' % section[0]
table_html = '<table>\n'
table_html += (
'<tr><th>'
+ '</th><th>'.join([header for header in section[1]])
+ '</th></tr>\n'
)
for row in section[2]:
table_html += (
'<tr><td>'
+ '</td><td>'.join([entry for entry in row])
+ '</td></tr>\n'
)
table_html += '</table>\n'
all_html += table_html
# Some email clients (e.g. GMail) require all styles to be inline.
# 'transform' takes the file-wide styles defined above and transforms
# them to be inline styles.
return transform(all_html)
def mail_report(self, recipient, subject, content):
log.info(f"Sending report '{subject}' to {recipient}.")
send_mail(
subject,
content,
from_email='[email protected]',
recipient_list=[recipient],
html_message=content,
reply_to=[recipient],
)
| 33.927461 | 87 | 0.539707 |
4a2737c93ef25b6490d45a897b8011932cba9aa7 | 995 | py | Python | 70_question/dynamic_programming/make_change.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
] | 26 | 2019-06-07T05:29:47.000Z | 2022-03-19T15:32:27.000Z | 70_question/dynamic_programming/make_change.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
] | null | null | null | 70_question/dynamic_programming/make_change.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
] | 6 | 2019-10-10T06:39:28.000Z | 2020-05-12T19:50:55.000Z | # O(nd) time | O(n) space
# where d is the number of denominations
def numberOfWaysToMakeChange(n, denoms):
"""
ways[amount] += ways[amount - denomination]
ways[amount] = ways[amount - denomination] + ways[amount]
for amount - denomination >= 0
base case: ways[amount=0] = number of ways for smallest demomination
"""
ways = [0] * (n + 1)
# Base case: the first coin has 1 way to represent the amount.
ways[0] = 1
for coin in denoms:
amount = coin
while amount < len(ways):
# Since we set amount = coin, amount - coin is always equal
# or greater than zero. Otherwise, check if amount - coin >= 0
ways[amount] = ways[amount] + ways[amount-coin]
amount += 1
# print("Ways (i=amount) = {}".format(ways))
print("Number of ways to change {} in terms of {} = {}".format(n, denoms, ways[n]))
return ways[n]
if __name__ == "__main__":
numberOfWaysToMakeChange(7, [2, 3, 4, 7])
| 33.166667 | 87 | 0.6 |
4a2737cc3ca162fb2f77c9be4f04f4a9ec65e9d5 | 284 | py | Python | src/utils/game_mode.py | paolodelia99/py-pacman | d58cc7429f19ab5189a17ce209cc467bd2db72a9 | [
"MIT"
] | 4 | 2021-03-15T16:28:01.000Z | 2022-03-07T21:41:15.000Z | src/utils/game_mode.py | paolodelia99/py-pacman | d58cc7429f19ab5189a17ce209cc467bd2db72a9 | [
"MIT"
] | null | null | null | src/utils/game_mode.py | paolodelia99/py-pacman | d58cc7429f19ab5189a17ce209cc467bd2db72a9 | [
"MIT"
] | null | null | null | from enum import Enum
class GameMode(Enum):
ready = 0
normal = 1
hit_ghost = 2
game_over = 3
wait_to_start = 4
wait_after_eating_ghost = 5
wait_after_finishing_level = 6
flash_maze = 7
extra_pacman = 8
change_ghosts = 9
black_screen = 10
| 17.75 | 34 | 0.651408 |
4a27384da8a1c3fd4dd173b652e8a625977fc7df | 7,784 | py | Python | tensorflow/python/kernel_tests/segment_reduction_ops_deterministic_test.py | ashutom/tensorflow-upstream | c16069c19de9e286dd664abb78d0ea421e9f32d4 | [
"Apache-2.0"
] | 10 | 2021-05-25T17:43:04.000Z | 2022-03-08T10:46:09.000Z | tensorflow/python/kernel_tests/segment_reduction_ops_deterministic_test.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 6 | 2021-11-10T20:16:36.000Z | 2022-02-10T05:52:17.000Z | tensorflow/python/kernel_tests/segment_reduction_ops_deterministic_test.py | CaptainGizzy21/tensorflow | 3457a2b122e50b4d44ceaaed5a663d635e5c22df | [
"Apache-2.0"
] | 3 | 2021-09-26T22:20:25.000Z | 2021-09-26T23:07:13.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for deterministic functionality of segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SegmentReductionDeterminismExceptionsTest(test.TestCase):
"""Test d9m-unimplemented exceptions from the segment reduction ops.
Test that tf.errors.UnimplementedError is thrown or not thrown, as
appropriate, by the GPU code-paths for segment reduction ops when
deterministic ops are enabled.
This test assumes that the base op test runs all the same test cases when
deterministic ops are not enabled and will therefore detect erroneous
exception throwing in those cases.
"""
def _input(self, data_type, segment_ids_type):
data = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=data_type)
segment_ids = constant_op.constant([0, 1], dtype=segment_ids_type)
num_segments = 2
return data, segment_ids, num_segments
@test_util.run_cuda_only
def testSortedOps(self):
op_should_throw_for_float = {
math_ops.segment_max: False,
math_ops.segment_min: False,
math_ops.segment_prod: True,
math_ops.segment_sum: True,
}
for op, should_throw_for_float in op_should_throw_for_float.items():
for segment_ids_type in [dtypes.int32, dtypes.int64]:
for data_type in [dtypes.float16, dtypes.float32, dtypes.float64]:
with self.cached_session(force_gpu=True):
data, segment_ids, _ = self._input(data_type, segment_ids_type)
if should_throw_for_float:
with self.assertRaisesRegex(
errors_impl.UnimplementedError,
"Deterministic GPU implementation of sorted segment " +
"reduction op not available."):
result = op(data, segment_ids)
self.evaluate(result)
else:
result = op(data, segment_ids)
self.evaluate(result)
_UNSORTED_ERROR_MESSAGE = ("Deterministic GPU implementation of unsorted " +
"segment reduction op not available.")
@test_util.run_cuda_only
@test_util.run_in_graph_and_eager_modes
def testUnsortedOps(self):
op_should_throw_for_float = {
math_ops.unsorted_segment_max: False,
math_ops.unsorted_segment_min: False,
math_ops.unsorted_segment_mean: True, # uses unsorted_segment_sum
math_ops.unsorted_segment_sqrt_n: True, # uses unsorted_segment_sum
math_ops.unsorted_segment_prod: True,
math_ops.unsorted_segment_sum: True,
}
with self.session(force_gpu=True):
for op, should_throw_for_float in op_should_throw_for_float.items():
for segment_ids_type in [dtypes.int32, dtypes.int64]:
for data_type in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32
]:
if (op == math_ops.unsorted_segment_sqrt_n and
data_type == dtypes.int32): # sqrt_n doesn't support int32
continue
data, segment_ids, num_segments = self._input(
data_type, segment_ids_type)
if (data_type != dtypes.int32) and should_throw_for_float:
with self.assertRaisesRegex(errors_impl.UnimplementedError,
self._UNSORTED_ERROR_MESSAGE):
result = op(data, segment_ids, num_segments)
self.evaluate(result)
else:
result = op(data, segment_ids, num_segments)
self.evaluate(result)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="No ROCm support for complex types in segment reduction ops")
@test_util.run_cuda_only
def testUnsortedOpsComplex(self):
for op in [
math_ops.unsorted_segment_sum,
]:
for data_type in [dtypes.complex64, dtypes.complex128]:
for segment_ids_type in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=True):
data, segment_ids, num_segments = self._input(
data_type, segment_ids_type)
with self.assertRaisesRegex(errors_impl.UnimplementedError,
self._UNSORTED_ERROR_MESSAGE):
result = op(data, segment_ids, num_segments)
self.evaluate(result)
@test_util.run_cuda_only
@test_util.run_in_graph_and_eager_modes
def testConvertToTensor(self):
with self.session(force_gpu=True):
dtypes_to_test = [dtypes.float16, dtypes.float32, dtypes.float64]
if not test.is_built_with_rocm():
dtypes_to_test += [dtypes.complex64, dtypes.complex128]
for data_type in dtypes_to_test:
for segment_ids_type in [dtypes.int32, dtypes.int64]:
values, indices, _ = self._input(data_type, segment_ids_type)
sparse_value = indexed_slices.IndexedSlices(
values, indices, dense_shape=values.shape)
with self.assertRaisesRegex(errors_impl.UnimplementedError,
self._UNSORTED_ERROR_MESSAGE):
# convert_to_tensor with IndexedSlices uses unsorted_segment_sum
result = ops.convert_to_tensor(sparse_value)
self.evaluate(result)
@test_util.run_cuda_only
def testGatherBackprop(self):
dtypes_to_test = [dtypes.float16, dtypes.float32, dtypes.float64]
if not test.is_built_with_rocm():
dtypes_to_test += [dtypes.complex64, dtypes.complex128]
for data_type in dtypes_to_test:
for segment_ids_type in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=True):
params, indices, _ = self._input(data_type, segment_ids_type)
params = variables.Variable(params)
with backprop.GradientTape() as tape:
tape.watch(params)
op_output = array_ops.gather(params, indices)
gradient = tape.gradient(op_output, params)
with self.assertRaisesRegex(errors_impl.UnimplementedError,
self._UNSORTED_ERROR_MESSAGE):
# convert_to_tensor on IndexedSlices
self.evaluate(params.assign(gradient))
if __name__ == "__main__":
# Note that the effect of setting the following environment variable to
# 'true' is not tested. Unless we can find a simpler pattern for testing these
# environment variables, it would require this file to be made into a base
# and then two more test files to be created.
os.environ["TF_DETERMINISTIC_OPS"] = "1"
test.main()
| 44.48 | 80 | 0.68795 |
4a273ac709f58bd4e7ab13520471452889ff0f16 | 1,123 | py | Python | test/test_country_details.py | Cloudmersive/Cloudmersive.APIClient.Python.Validate | 894a3f578c3860db41b3eed179dcc52e02f565a0 | [
"Apache-2.0"
] | 3 | 2018-06-23T21:37:21.000Z | 2020-04-20T23:07:36.000Z | test/test_country_details.py | Cloudmersive/Cloudmersive.APIClient.Python.Validate | 894a3f578c3860db41b3eed179dcc52e02f565a0 | [
"Apache-2.0"
] | 1 | 2019-02-04T17:03:35.000Z | 2019-03-02T20:16:52.000Z | test/test_country_details.py | Cloudmersive/Cloudmersive.APIClient.Python.Validate | 894a3f578c3860db41b3eed179dcc52e02f565a0 | [
"Apache-2.0"
] | 2 | 2019-03-21T15:54:15.000Z | 2020-05-27T17:30:43.000Z | # coding: utf-8
"""
validateapi
The validation APIs help you validate data. Check if an E-mail address is real. Check if a domain is real. Check up on an IP address, and even where it is located. All this and much more is available in the validation API. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_validate_api_client
from cloudmersive_validate_api_client.models.country_details import CountryDetails # noqa: E501
from cloudmersive_validate_api_client.rest import ApiException
class TestCountryDetails(unittest.TestCase):
"""CountryDetails unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCountryDetails(self):
"""Test CountryDetails"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_validate_api_client.models.country_details.CountryDetails() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.390244 | 240 | 0.731968 |
4a273b3c39bbc15b578413c52fc0111332235426 | 8,167 | py | Python | python/maintenance.py | jpiercefield/CattleTrakcer | a1fb93a8c2f1072601bcd9871c5928610cbbaf88 | [
"MIT"
] | 1 | 2017-06-10T19:17:11.000Z | 2017-06-10T19:17:11.000Z | python/maintenance.py | jpiercefield/CattleTrakcer | a1fb93a8c2f1072601bcd9871c5928610cbbaf88 | [
"MIT"
] | null | null | null | python/maintenance.py | jpiercefield/CattleTrakcer | a1fb93a8c2f1072601bcd9871c5928610cbbaf88 | [
"MIT"
] | 2 | 2017-03-21T01:41:53.000Z | 2018-12-03T08:38:06.000Z | from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
import smtplib
import time
import datetime
from sqlalchemy import create_engine, Column, Integer, String, DateTime, Binary
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import exists
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.util import has_identity
from sqlalchemy import exc
from sqlalchemy import text
engine = create_engine('mysql://root:CattleTrax11!@localhost:3306/cattletrax', echo=False)
Base = declarative_base(engine)
Session = sessionmaker(bind=engine, autoflush=True, autocommit=False)
class Feeder(Base):
__tablename__='feeder'
ref_id = Column(Integer, primary_key=True)
num_visits = Column(Integer)
last_visit_date = Column("last_visit_date", DateTime)
class Cow(Base):
__tablename__='cow'
cow_id = Column(Integer, primary_key=True)
due_date = Column(DateTime)
first_year = Column(Binary)
pregnant = Column(Binary)
calving_cond = Column(String)
calving_ease = Column(Integer)
calf_bonding = Column(Integer)
class Animal(Base):
__tablename__='animal'
animal_id = Column(Integer,primary_key=True)
tag_id = Column(Integer)
herd_id = Column(Integer)
pasture_id = Column(Integer)
farm_id = Column(Integer)
a_type = Column(String)
class t1(Base):
__tablename__='t1'
cow_id = Column(Integer,primary_key=True)
tag_id = Column(Integer)
due_date = Column(DateTime)
num = Column(Integer)
future = Column(DateTime)
class t2(Base):
__tablename__ = 't2'
cow_id = Column(Integer,primary_key=True)
tag_id = Column(Integer)
due_date = Column(DateTime)
past = Column(Integer)
class t3(Base):
__tablename__ = 't3'
cow_id = Column(Integer,primary_key=True)
tag_id = Column(Integer)
due_date = Column(DateTime)
past = Column(Integer)
class user(Base):
__tablename__ = 'user'
user_id = Column(Integer, primary_key=True)
email = Column(String)
def createBody():
fmt='%H:%M:%S %m-%d-%Y '
feed = []
today = []
preg = []
past_due = []
body = ""
session = Session()
#conn = engine.connect()
stmt = text("SELECT \
animal.tag_id, \
feeder.last_visit_date \
FROM \
feeder \
JOIN \
animal ON feeder.ref_id = animal.rfid \
WHERE \
feeder.last_visit_date <= DATE_ADD(DATE(NOW()), \
INTERVAL -3 DAY)")
stmt = stmt.columns(Animal.tag_id, Feeder.last_visit_date)
for Animal.tag_id, Feeder.last_visit_date in session.query(Animal.tag_id, Feeder.last_visit_date).from_statement(stmt):
next = "Tag: "
id = str(Animal.tag_id)
next += id
next += " Last Visit: "
next += str((Feeder.last_visit_date).strftime(fmt))
date = Feeder.last_visit_date
feed.append(next)
body += "\nCattle that have not visited the feeder in the past 3 days:"
if not feed:
body += "\nNothing!"
else:
for row in feed:
body += "\n"
body += row
stmt3 = text("SELECT \
`tag_id`, \
`due_date`, \
DATEDIFF(DATE(NOW()), `due_date`) AS 'past' \
FROM \
`cow` \
JOIN \
`animal` ON cow.cow_id = animal.animal_id \
WHERE \
`due_date` = DATE(NOW()) AND `pregnant` = 1 \
ORDER BY \
`past` DESC")
stmt3 = stmt3.columns(t3.tag_id, t3.due_date, t3.past)
for t3.tag_id, t3.due_date, t3.past in session.query(t3.tag_id, t3.due_date, t3.past).from_statement(stmt3):
next = "Tag: "
id = str(t3.tag_id)
next += id
today.append(next)
body += "\n\nCows due today: "
if not today:
body += "\nNothing!"
else:
for row in today:
body += "\n"
body += row
fmt='%m-%d-%Y'
stmt1 = text("SELECT \
`tag_id`, \
`due_date`, \
`num`\
FROM \
(SELECT \
`cow_id`, \
`tag_id`, \
`due_date`, \
DATEDIFF(`due_date`,DATE(NOW())) AS 'num' \
FROM \
`cow` \
JOIN \
`animal` ON cow.cow_id = animal.animal_id \
WHERE \
`due_date` > DATE(NOW()) AND `due_date` <= (DATE(NOW())+7) AND `pregnant` = 1) \
AS t1 \
ORDER BY \
`num` ASC")
stmt1 = stmt1.columns(t1.tag_id, t1.due_date, t1.num)
for t1.tag_id, t1.due_date, t1.num in session.query(t1.tag_id, t1.due_date, t1.num).from_statement(stmt1):
next = "Tag: "
id = str(t1.tag_id)
next += id
next += " Days until Due: "
next += str(t1.num)
next += " Due Date: "
next += str((t1.due_date).strftime(fmt))
preg.append(next)
body += "\n\nCows due in the next 7 days: "
if not preg:
body += "\nNothing!"
else:
for row in preg:
body += "\n"
body += row
stmt2 = text("SELECT \
`tag_id`, \
`due_date`, \
DATEDIFF(DATE(NOW()), `due_date`) AS 'past' \
FROM \
`cow` \
JOIN \
`animal` ON cow.cow_id = animal.animal_id \
WHERE \
`due_date` < DATE(NOW()) AND `pregnant` = 1 \
ORDER BY \
`past` DESC")
stmt2 = stmt2.columns(t2.tag_id, t2.due_date, t2.past)
for t2.tag_id, t2.due_date, t2.past in session.query(t2.tag_id, t2.due_date, t2.past).from_statement(stmt2):
next = "Tag: "
id = str(t2.tag_id)
next += id
next += " Days Past Due: "
next += str(t2.past)
next += " Due Date: "
next += str((t2.due_date).strftime(fmt))
past_due.append(next)
body += "\n\nCows past due: "
if not past_due:
body += "\nNothing!"
else:
for row in past_due:
body += "\n"
body += row
return body
def sendEmail(bod):
fromaddr = "[email protected]"
session = Session()
for email in session.query(user.email, user.user_id).filter(user.user_id == 1):
toaddr = str(email.email)
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Daily Email"
body = bod
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login("Cattletrackernotifications", "Tracker2017")
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
def main():
body = createBody()
sendEmail(body)
if __name__ == "__main__":
main()
| 33.609053 | 120 | 0.478389 |
4a273bd6174c7d8bc1502225dd5d6a62220ac024 | 9,151 | py | Python | app/api/v1/schema/regdetails.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 6 | 2018-11-07T12:41:30.000Z | 2020-04-12T18:07:03.000Z | app/api/v1/schema/regdetails.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 1 | 2020-10-20T12:33:18.000Z | 2020-10-20T12:33:18.000Z | app/api/v1/schema/regdetails.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 10 | 2018-11-12T06:15:19.000Z | 2021-11-18T05:45:12.000Z | """
DRS Registration schema package.
Copyright (c) 2018-2020 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from marshmallow import Schema, fields, validates, pre_load, pre_dump, post_dump, post_load, validate
from app.api.v1.helpers.validators import *
from app.api.v1.models.devicequota import DeviceQuota
from app.api.v1.models.status import Status
import ast
import pydash
from app import app, GLOBAL_CONF
from flask_babel import gettext as _
class RegistrationDetailsSchema(Schema):
"""Schema for Registration routes."""
id = fields.Int(required=False)
device_count = fields.Int(required=True, error_messages={'required': 'Device count is required'})
reviewer_id = fields.Str(required=False)
reviewer_name = fields.Str(required=False)
report_allowed = fields.Boolean(required=False)
user_id = fields.Str(required=True, error_messages={'required': 'User Id is required'})
user_name = fields.Str(required=True, error_messages={'required': 'User Name is required'})
imei_per_device = fields.Int(required=True, error_messages={'required': 'Imei per device count is required'})
m_location = fields.Str(required=True, error_messages={'required': 'manufacturing location is a required field'})
file = fields.Str(required=False)
file_link = fields.Str()
created_at = fields.DateTime()
updated_at = fields.DateTime()
imeis = fields.List(fields.List(fields.Str(validate=validate_imei)), required=False)
imeis_count = fields.Int(required=False)
status_label = fields.Str(required=False)
# processed = fields.Boolean()
processing_status_label = fields.Str()
report_status_label = fields.Str()
tracking_id = fields.Str()
report = fields.String()
duplicate_imeis_file = fields.String(missing='')
msisdn = fields.Str()
network = fields.Str()
@pre_load()
def file_webpage(self, data):
"""Validates type of input."""
if 'imeis' not in data and 'file' not in data:
raise ValidationError('Either file or webpage input is required',
field_names=['imeis', 'file']
)
elif 'imeis' in data and 'file' in data:
raise ValidationError('Either file or webpage input is required',
field_names=['imeis', 'file']
)
@pre_load()
def convert_imei(self, data):
"""Converts imei to supported formats."""
if 'imeis' in data and 'file' not in data:
try:
data['imeis'] = ast.literal_eval(data.get('imeis'))
except Exception as e:
raise ValidationError('Invalid format for IMEIs Input', field_names=['imeis'])
imeis = pydash.flatten_deep(data['imeis'])
if len(imeis) == 0:
raise ValidationError('Invalid format for IMEIs Input', field_names=['imeis'])
elif not isinstance(data['imeis'][0], list):
raise ValidationError('Invalid format for IMEIs Input', field_names=['imeis'])
elif len(imeis) != len(list(set(imeis))):
raise ValidationError(_('Duplicate IMEIs in request'), field_names=['imeis'])
elif 'device_count' in data and data['device_count'].isdigit():
if int(data['device_count']) > 10:
raise ValidationError('Only 10 device are allowed in case of webpage input',
field_names=['imeis'])
if int(data['device_count']) != len(data['imeis']):
raise ValidationError('Device count should be same as no of devices',
field_names=['device_count'])
if 'imei_per_device' in data and data['imei_per_device'].isdigit():
if int(data['imei_per_device']) > 5:
raise ValidationError('Only 5 imeis are allowed per device in webpage',
field_names=['imei_per_device'])
invalid = list(filter(lambda x: len(x) != int(data['imei_per_device']), data['imeis']))
if len(invalid) > 0:
raise ValidationError('No of imei for each device should be same as imei per device',
field_names=['imei_per_device'])
@pre_dump()
def get_file_link(self, data):
"""Returns downloadable links to the files."""
if not data.imeis:
upload_dir_path = GLOBAL_CONF['upload_directory']
data.file_link = '{server_dir}/{local_dir}/{file_name}'.format(
server_dir=upload_dir_path,
local_dir=data.tracking_id,
file_name=data.file
)
@pre_dump()
def request_status(self, data):
"""Returns current status of the request."""
data.status_label = Status.get_status_type(data.status)
data.processing_status_label = Status.get_status_type(data.processing_status)
data.report_status_label = Status.get_status_type(data.report_status)
@pre_dump()
def convert_imeis(self, data):
"""Convert imeis."""
if data.imeis:
try:
data.imeis = ast.literal_eval(data.imeis)
except:
pass
@pre_load()
def create_device_quota(self, data):
"""Create a new device quotes for the user."""
if 'user_id' in data:
DeviceQuota.get_or_create(data['user_id'], 'importer')
@validates('device_count')
def validate_device_count(self, value):
"""Validates devices count."""
if value <= 0:
raise ValidationError('Device count must be a positive number',
field_names=['device_count'])
if value > 10000000:
raise ValidationError('Device count in single request should be less than 10000000')
@validates('m_location')
def validate_manufacturing_location(self, value):
"""Validates manufacturing localtions."""
locations = ['overseas', 'local']
if value not in locations:
raise ValidationError('Manufacturing location must be either local or overseas',
field_names=['m_location'])
@validates('file')
def validate_filename(self, value):
"""Validates file name."""
if not value.endswith('.tsv'):
raise ValidationError('Only tsv files are allowed', field_names=['file'])
elif len(value) > 100:
raise ValidationError('File name length should be under 100 characters', field_names=['file'])
@validates('user_id')
def validate_user_id(self, value):
"""Validates user id."""
validate_input('user id', value)
@validates('user_name')
def validate_user_name(self, value):
"""Validates user name."""
validate_input('user name', value)
| 55.79878 | 844 | 0.654136 |
4a273c2c165578d953d10066f56f220499d219d0 | 12,791 | py | Python | spritecss/packing/anneal.py | yostudios/Spritemapper | 277cb76a14be639b6d7fa3191bc427409e72ad69 | [
"MIT"
] | 49 | 2015-01-22T14:27:32.000Z | 2021-12-24T23:07:40.000Z | spritecss/packing/anneal.py | tzuryby/Spritemapper | 7cd3b68348a86982420b6231861fda4a0e676f35 | [
"MIT"
] | 2 | 2015-02-12T12:31:34.000Z | 2015-04-12T10:43:17.000Z | spritecss/packing/anneal.py | tzuryby/Spritemapper | 7cd3b68348a86982420b6231861fda4a0e676f35 | [
"MIT"
] | 6 | 2015-04-03T07:29:54.000Z | 2021-12-15T02:21:35.000Z | #!/usr/bin/env python
# Python module for simulated annealing - anneal.py - v1.0 - 2 Sep 2009
#
# Copyright (c) 2009, Richard J. Wagner <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This module performs simulated annealing to find a state of a system that
minimizes its energy.
An example program demonstrates simulated annealing with a traveling
salesman problem to find the shortest route to visit the twenty largest
cities in the United States.
"""
# How to optimize a system with simulated annealing:
#
# 1) Define a format for describing the state of the system.
#
# 2) Define a function to calculate the energy of a state.
#
# 3) Define a function to make a random change to a state.
#
# 4) Choose a maximum temperature, minimum temperature, and number of steps.
#
# 5) Set the annealer to work with your state and functions.
#
# 6) Study the variation in energy with temperature and duration to find a
# productive annealing schedule.
#
# Or,
#
# 4) Run the automatic annealer which will attempt to choose reasonable values
# for maximum and minimum temperatures and then anneal for the allotted time.
import copy, math, random, sys, time
def round_figures(x, n):
"""Returns x rounded to n significant figures."""
return round(x, int(n - math.ceil(math.log10(abs(x)))))
def time_string(seconds):
"""Returns time in seconds as a string formatted HHHH:MM:SS."""
s = int(round(seconds)) # round to nearest second
h, s = divmod(s, 3600) # get hours and remainder
m, s = divmod(s, 60) # split remainder into minutes and seconds
return '%4i:%02i:%02i' % (h, m, s)
class Annealer:
"""Performs simulated annealing by calling functions to calculate
energy and make moves on a state. The temperature schedule for
annealing may be provided manually or estimated automatically.
"""
out = sys.stderr
def __init__(self, energy, move):
self.energy = energy # function to calculate energy of a state
self.move = move # function to make a random change to a state
def anneal(self, state, Tmax, Tmin, steps, updates=0):
"""Minimizes the energy of a system by simulated annealing.
Keyword arguments:
state -- an initial arrangement of the system
Tmax -- maximum temperature (in units of energy)
Tmin -- minimum temperature (must be greater than zero)
steps -- the number of steps requested
updates -- the number of updates to print during annealing
Returns the best state and energy found."""
step = 0
start = time.time()
wln = lambda t: self.out.write(t + "\n")
def update(T, E, acceptance, improvement):
"""Prints the current temperature, energy, acceptance rate,
improvement rate, elapsed time, and remaining time.
The acceptance rate indicates the percentage of moves since the last
update that were accepted by the Metropolis algorithm. It includes
moves that decreased the energy, moves that left the energy
unchanged, and moves that increased the energy yet were reached by
thermal excitation.
The improvement rate indicates the percentage of moves since the
last update that strictly decreased the energy. At high
temperatures it will include both moves that improved the overall
state and moves that simply undid previously accepted moves that
increased the energy by thermal excititation. At low temperatures
it will tend toward zero as the moves that can decrease the energy
are exhausted and moves that would increase the energy are no longer
thermally accessible."""
elapsed = time.time() - start
if step == 0:
wln(' Temperature Energy Accept Improve Elapsed Remaining')
wln('%12.2f %12.2f %s ' %
(T, E, time_string(elapsed) ))
else:
remain = ( steps - step ) * ( elapsed / step )
wln('%12.2f %12.2f %7.2f%% %7.2f%% %s %s' %
(T, E, 100.0*acceptance, 100.0*improvement,
time_string(elapsed), time_string(remain)))
# Precompute factor for exponential cooling from Tmax to Tmin
if Tmin <= 0.0:
raise ValueError('exponential cooling requires a minimum '
'temperature greater than zero')
Tfactor = -math.log( float(Tmax) / Tmin )
# Note initial state
T = Tmax
E = self.energy(state)
prevState = copy.deepcopy(state)
prevEnergy = E
bestState = copy.deepcopy(state)
bestEnergy = E
trials, accepts, improves = 0, 0, 0
if updates > 0:
updateWavelength = float(steps) / updates
update(T, E, None, None)
# Attempt moves to new states
while step < steps:
step += 1
T = Tmax * math.exp( Tfactor * step / steps )
self.move(state)
E = self.energy(state)
dE = E - prevEnergy
trials += 1
if dE > 0.0 and math.exp(-dE/T) < random.random():
# Restore previous state
state = copy.deepcopy(prevState)
E = prevEnergy
else:
# Accept new state and compare to best state
accepts += 1
if dE < 0.0:
improves += 1
prevState = copy.deepcopy(state)
prevEnergy = E
if E < bestEnergy:
bestState = copy.deepcopy(state)
bestEnergy = E
if updates > 1:
if step // updateWavelength > (step-1) // updateWavelength:
update(T, E, float(accepts)/trials, float(improves)/trials)
trials, accepts, improves = 0, 0, 0
# Return best state and energy
return bestState, bestEnergy
def auto(self, state, minutes, steps=2000):
"""Minimizes the energy of a system by simulated annealing with
automatic selection of the temperature schedule.
Keyword arguments:
state -- an initial arrangement of the system
minutes -- time to spend annealing (after exploring temperatures)
steps -- number of steps to spend on each stage of exploration
Returns the best state and energy found."""
wln = lambda t: self.out.write(t + "\n")
def run(state, T, steps):
"""Anneals a system at constant temperature and returns the state,
energy, rate of acceptance, and rate of improvement."""
E = self.energy(state)
prevState = copy.deepcopy(state)
prevEnergy = E
accepts, improves = 0, 0
for step in range(steps):
self.move(state)
E = self.energy(state)
dE = E - prevEnergy
if dE > 0.0 and math.exp(-dE/T) < random.random():
state = copy.deepcopy(prevState)
E = prevEnergy
else:
accepts += 1
if dE < 0.0:
improves += 1
prevState = copy.deepcopy(state)
prevEnergy = E
return state, E, float(accepts)/steps, float(improves)/steps
step = 0
start = time.time()
wln('Attempting automatic simulated anneal...')
# Find an initial guess for temperature
T = 0.0
E = self.energy(state)
while T == 0.0:
step += 1
self.move(state)
T = abs( self.energy(state) - E )
wln('Exploring temperature landscape:')
wln(' Temperature Energy Accept Improve Elapsed')
def update(T, E, acceptance, improvement):
"""Prints the current temperature, energy, acceptance rate,
improvement rate, and elapsed time."""
elapsed = time.time() - start
wln('%12.2f %12.2f %7.2f%% %7.2f%% %s' % \
(T, E, 100.0*acceptance, 100.0*improvement, time_string(elapsed)))
# Search for Tmax - a temperature that gives 98% acceptance
state, E, acceptance, improvement = run(state, T, steps)
step += steps
while acceptance > 0.98:
T = round_figures(T/1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
while acceptance < 0.98:
T = round_figures(T*1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
Tmax = T
# Search for Tmin - a temperature that gives 0% improvement
while improvement > 0.0:
T = round_figures(T/1.5, 2)
state, E, acceptance, improvement = run(state, T, steps)
step += steps
update(T, E, acceptance, improvement)
Tmin = T
# Calculate anneal duration
elapsed = time.time() - start
duration = round_figures(int(60.0 * minutes * step / elapsed), 2)
# Perform anneal
wln('Annealing from %.2f to %.2f over %i steps:' % (Tmax, Tmin, duration))
return self.anneal(state, Tmax, Tmin, duration, 20)
if __name__ == '__main__':
"""Test annealer with a traveling salesman problem."""
# List latitude and longitude (degrees) for the twenty largest U.S. cities
cities = { 'New York City': (40.72,74.00), 'Los Angeles': (34.05,118.25),
'Chicago': (41.88,87.63), 'Houston': (29.77,95.38),
'Phoenix': (33.45,112.07), 'Philadelphia': (39.95,75.17),
'San Antonio': (29.53,98.47), 'Dallas': (32.78,96.80),
'San Diego': (32.78,117.15), 'San Jose': (37.30,121.87),
'Detroit': (42.33,83.05), 'San Francisco': (37.78,122.42),
'Jacksonville': (30.32,81.70), 'Indianapolis': (39.78,86.15),
'Austin': (30.27,97.77), 'Columbus': (39.98,82.98),
'Fort Worth': (32.75,97.33), 'Charlotte': (35.23,80.85),
'Memphis': (35.12,89.97), 'Baltimore': (39.28,76.62) }
def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos( math.sin(lat1)*math.sin(lat2) +
math.cos(lat1)*math.cos(lat2)*math.cos(lon1-lon2) ) * R
def route_move(state):
"""Swaps two cities in the route."""
a = random.randint( 0, len(state)-1 )
b = random.randint( 0, len(state)-1 )
state[a], state[b] = state[b], state[a]
def route_energy(state):
"""Calculates the length of the route."""
e = 0
for i in range(len(state)):
e += distance( cities[state[i-1]], cities[state[i]] )
return e
# Start with the cities listed in random order
state = cities.keys()
random.shuffle(state)
# Minimize the distance to be traveled by simulated annealing with a
# manually chosen temperature schedule
annealer = Annealer(route_energy, route_move)
state, e = annealer.anneal(state, 10000000, 0.01, 18000*len(state), 9)
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
print "%i mile route:" % route_energy(state)
for city in state:
print "\t", city
# Minimize the distance to be traveled by simulated annealing with an
# automatically chosen temperature schedule
state, e = annealer.auto(state, 4)
while state[0] != 'New York City':
state = state[1:] + state[:1] # rotate NYC to start
print "%i mile route:" % route_energy(state)
for city in state:
print "\t", city
sys.exit()
| 40.477848 | 93 | 0.596435 |
4a273c99f824d1aff1168c04620a7dba0126d34c | 5,363 | py | Python | kolibri/core/content/test/test_movedirectory.py | arceduardvincent/kolibri | 26073dda2569bb38bfe1e08ba486e96f650d10ce | [
"MIT"
] | null | null | null | kolibri/core/content/test/test_movedirectory.py | arceduardvincent/kolibri | 26073dda2569bb38bfe1e08ba486e96f650d10ce | [
"MIT"
] | null | null | null | kolibri/core/content/test/test_movedirectory.py | arceduardvincent/kolibri | 26073dda2569bb38bfe1e08ba486e96f650d10ce | [
"MIT"
] | null | null | null | from django.core.management import call_command
from django.test import TestCase
from mock import patch
from kolibri.utils.conf import OPTIONS
class ContentMoveDirectoryTestCase(TestCase):
"""
Testcase for the command kolibri manage content movedirectory <destination>
"""
# Helper methods
def _path_exists_side_effect(*args):
if args[0] == OPTIONS['Paths']['CONTENT_DIR']:
return True
elif args[0].startswith('/test/success'):
return False
return True
def _listdir_side_effect(*args):
if args[0] == OPTIONS['Paths']['CONTENT_DIR']:
return ['databases', 'storage']
elif args[0] == OPTIONS['Paths']['CONTENT_DIR'] + '/databases':
return ['test.sqlite3']
elif args[0] == OPTIONS['Paths']['CONTENT_DIR'] + '/storage':
return ['test.mp3']
elif args[0] == '/test/content_exists_yes/databases':
return ['exists.sqlite3']
return []
@patch('kolibri.core.content.management.commands.content.Command.migrate')
@patch('kolibri.core.content.management.commands.content.os.path.exists', return_value=False)
def test_current_content_dir_dne(self, path_exists_mock, migrate_mock):
with self.assertRaises(SystemExit):
call_command('content', 'movedirectory', 'test')
migrate_mock.assert_not_called()
@patch('kolibri.core.content.management.commands.content.Command.migrate')
@patch('kolibri.utils.server.get_status', return_value=True)
@patch('kolibri.core.content.management.commands.content.os.path.exists', return_value=True)
def test_migrate_while_kolibri_running(self, path_exists_mock, server_mock, migrate_mock):
with self.assertRaises(SystemExit):
call_command('content', 'movedirectory', 'test')
migrate_mock.assert_not_called()
@patch('kolibri.core.content.management.commands.content.shutil.rmtree')
@patch('kolibri.core.content.management.commands.content.shutil.copy2')
@patch('kolibri.core.content.management.commands.content.input', return_value='no')
@patch('kolibri.core.content.management.commands.content.os.listdir', side_effect=_listdir_side_effect)
@patch('kolibri.core.content.management.commands.content.os.path.exists', return_value=True)
def test_migrate_while_dest_content_exists_no(self, path_exists_mock, listdir_mock, input_mock, copyfile_mock, remove_mock):
destination = '/test/content_exists_no'
call_command('content', 'movedirectory', destination)
self.assertEqual(copyfile_mock.call_count, 2)
self.assertEqual(remove_mock.call_count, 2)
@patch('kolibri.core.content.management.commands.content.Command.copy_content')
@patch('kolibri.core.content.management.commands.content.shutil.rmtree')
@patch('kolibri.core.content.management.commands.content.input', return_value='yes')
@patch('kolibri.core.content.management.commands.content.os.listdir', return_value=['test'])
@patch('kolibri.core.content.management.commands.content.os.path.exists', return_value=True)
def test_migrate_while_dest_content_exists_yes(self, path_exists_mock, listdir_mock, input_mock, remove_mock, copy_mock):
destination = '/test/content_exists_yes'
call_command('content', 'movedirectory', destination)
copy_mock.assert_called()
self.assertEqual(remove_mock.call_count, 4)
@patch('kolibri.core.content.management.commands.content.Command.update_config_content_directory')
@patch('kolibri.core.content.management.commands.content.input', return_value='random')
@patch('kolibri.core.content.management.commands.content.os.listdir', return_value=['test'])
@patch('kolibri.core.content.management.commands.content.os.path.exists', return_value=True)
def test_migrate_while_dest_content_exists_random(self, path_exists_mock, listdir_mock, input_mock, update_mock):
destination = '/test/content_exists_random'
with self.assertRaises(SystemExit):
call_command('content', 'movedirectory', destination)
update_mock.assert_not_called()
@patch('kolibri.core.content.management.commands.content.shutil.rmtree')
@patch('kolibri.core.content.management.commands.content.shutil.copystat')
@patch('kolibri.core.content.management.commands.content.os.makedirs')
@patch('kolibri.core.content.management.commands.content.os.listdir', return_value=[])
@patch('kolibri.core.content.management.commands.content.os.path.exists', side_effect=_path_exists_side_effect)
def test_migrate_while_dest_dir_dne_success(self, path_exists_mock, listdir_mock, mkdir_mock, copystat_mock, remove_mock):
destination = '/test/success'
call_command('content', 'movedirectory', destination)
remove_mock.assert_called()
mkdir_mock.assert_called()
copystat_mock.assert_called()
@patch('kolibri.core.content.management.commands.content.Command.migrate')
@patch('kolibri.core.content.management.commands.content.os.path.exists', return_value=True)
def test_current_dir_equals_destination(self, path_exists_mock, migrate_mock):
with self.assertRaises(SystemExit):
call_command('content', 'movedirectory', OPTIONS['Paths']['CONTENT_DIR'])
migrate_mock.assert_not_called()
| 56.452632 | 128 | 0.732612 |
4a273e1ee255533a50d76c84314e30b3ed1c3f6b | 26,325 | py | Python | test/sql/test_text.py | csboling/sqlalchemy-fk-hack | d199de2620ac2bd2d6be5e5f081a8b2c3a4211dc | [
"MIT"
] | null | null | null | test/sql/test_text.py | csboling/sqlalchemy-fk-hack | d199de2620ac2bd2d6be5e5f081a8b2c3a4211dc | [
"MIT"
] | null | null | null | test/sql/test_text.py | csboling/sqlalchemy-fk-hack | d199de2620ac2bd2d6be5e5f081a8b2c3a4211dc | [
"MIT"
] | null | null | null | """Test the TextClause and related constructs."""
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, eq_, \
assert_raises_message, expect_warnings, assert_warnings
from sqlalchemy import text, select, Integer, String, Float, \
bindparam, and_, func, literal_column, exc, MetaData, Table, Column,\
asc, func, desc, union
from sqlalchemy.types import NullType
from sqlalchemy.sql import table, column, util as sql_util
from sqlalchemy import util
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_basic(self):
self.assert_compile(
text("select * from foo where lala = bar"),
"select * from foo where lala = bar"
)
class SelectCompositionTest(fixtures.TestBase, AssertsCompiledSQL):
"""test the usage of text() implicit within the select() construct
when strings are passed."""
__dialect__ = 'default'
def test_select_composition_one(self):
self.assert_compile(select(
[
literal_column("foobar(a)"),
literal_column("pk_foo_bar(syslaal)")
],
text("a = 12"),
from_obj=[
text("foobar left outer join lala on foobar.foo = lala.foo")
]
),
"SELECT foobar(a), pk_foo_bar(syslaal) FROM foobar "
"left outer join lala on foobar.foo = lala.foo WHERE a = 12"
)
def test_select_composition_two(self):
s = select()
s.append_column(column("column1"))
s.append_column(column("column2"))
s.append_whereclause(text("column1=12"))
s.append_whereclause(text("column2=19"))
s = s.order_by("column1")
s.append_from(text("table1"))
self.assert_compile(s, "SELECT column1, column2 FROM table1 WHERE "
"column1=12 AND column2=19 ORDER BY column1")
def test_select_composition_three(self):
self.assert_compile(
select([column("column1"), column("column2")],
from_obj=table1).alias('somealias').select(),
"SELECT somealias.column1, somealias.column2 FROM "
"(SELECT column1, column2 FROM mytable) AS somealias"
)
def test_select_composition_four(self):
# test that use_labels doesn't interfere with literal columns
self.assert_compile(
select([
text("column1"), column("column2"),
column("column3").label("bar"), table1.c.myid],
from_obj=table1,
use_labels=True),
"SELECT column1, column2, column3 AS bar, "
"mytable.myid AS mytable_myid "
"FROM mytable"
)
def test_select_composition_five(self):
# test that use_labels doesn't interfere
# with literal columns that have textual labels
self.assert_compile(
select([
text("column1 AS foobar"), text("column2 AS hoho"),
table1.c.myid],
from_obj=table1, use_labels=True),
"SELECT column1 AS foobar, column2 AS hoho, "
"mytable.myid AS mytable_myid FROM mytable"
)
def test_select_composition_six(self):
# test that "auto-labeling of subquery columns"
# doesn't interfere with literal columns,
# exported columns don't get quoted
self.assert_compile(
select([
literal_column("column1 AS foobar"),
literal_column("column2 AS hoho"), table1.c.myid],
from_obj=[table1]).select(),
"SELECT column1 AS foobar, column2 AS hoho, myid FROM "
"(SELECT column1 AS foobar, column2 AS hoho, "
"mytable.myid AS myid FROM mytable)"
)
def test_select_composition_seven(self):
self.assert_compile(
select([
literal_column('col1'),
literal_column('col2')
], from_obj=table('tablename')).alias('myalias'),
"SELECT col1, col2 FROM tablename"
)
def test_select_composition_eight(self):
self.assert_compile(select(
[table1.alias('t'), text("foo.f")],
text("foo.f = t.id"),
from_obj=[text("(select f from bar where lala=heyhey) foo")]
),
"SELECT t.myid, t.name, t.description, foo.f FROM mytable AS t, "
"(select f from bar where lala=heyhey) foo WHERE foo.f = t.id")
def test_select_bundle_columns(self):
self.assert_compile(select(
[table1, table2.c.otherid,
text("sysdate()"), text("foo, bar, lala")],
and_(
text("foo.id = foofoo(lala)"),
text("datetime(foo) = Today"),
table1.c.myid == table2.c.otherid,
)
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, sysdate(), foo, bar, lala "
"FROM mytable, myothertable WHERE foo.id = foofoo(lala) AND "
"datetime(foo) = Today AND mytable.myid = myothertable.otherid")
class BindParamTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_legacy(self):
t = text("select * from foo where lala=:bar and hoho=:whee",
bindparams=[bindparam('bar', 4), bindparam('whee', 7)])
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={'bar': 4, 'whee': 7},
)
def test_positional(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam('bar', 4), bindparam('whee', 7))
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={'bar': 4, 'whee': 7},
)
def test_kw(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bar=4, whee=7)
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={'bar': 4, 'whee': 7},
)
def test_positional_plus_kw(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam('bar', 4), whee=7)
self.assert_compile(
t,
"select * from foo where lala=:bar and hoho=:whee",
checkparams={'bar': 4, 'whee': 7},
)
def test_literal_binds(self):
t = text("select * from foo where lala=:bar and hoho=:whee")
t = t.bindparams(bindparam('bar', 4), whee='whee')
self.assert_compile(
t,
"select * from foo where lala=4 and hoho='whee'",
checkparams={},
literal_binds=True
)
def _assert_type_map(self, t, compare):
map_ = dict(
(b.key, b.type) for b in t._bindparams.values()
)
for k in compare:
assert compare[k]._type_affinity is map_[k]._type_affinity
def test_typing_construction(self):
t = text("select * from table :foo :bar :bat")
self._assert_type_map(t, {"foo": NullType(),
"bar": NullType(),
"bat": NullType()})
t = t.bindparams(bindparam('foo', type_=String))
self._assert_type_map(t, {"foo": String(),
"bar": NullType(),
"bat": NullType()})
t = t.bindparams(bindparam('bar', type_=Integer))
self._assert_type_map(t, {"foo": String(),
"bar": Integer(),
"bat": NullType()})
t = t.bindparams(bat=45.564)
self._assert_type_map(t, {"foo": String(),
"bar": Integer(),
"bat": Float()})
def test_binds_compiled_named(self):
self.assert_compile(
text("select * from foo where lala=:bar and hoho=:whee").
bindparams(bar=4, whee=7),
"select * from foo where lala=%(bar)s and hoho=%(whee)s",
checkparams={'bar': 4, 'whee': 7},
dialect="postgresql"
)
def test_binds_compiled_positional(self):
self.assert_compile(
text("select * from foo where lala=:bar and hoho=:whee").
bindparams(bar=4, whee=7),
"select * from foo where lala=? and hoho=?",
checkparams={'bar': 4, 'whee': 7},
dialect="sqlite"
)
def test_missing_bind_kw(self):
assert_raises_message(
exc.ArgumentError,
r"This text\(\) construct doesn't define "
r"a bound parameter named 'bar'",
text(":foo").bindparams,
foo=5,
bar=7)
def test_missing_bind_posn(self):
assert_raises_message(
exc.ArgumentError,
r"This text\(\) construct doesn't define "
r"a bound parameter named 'bar'",
text(":foo").bindparams,
bindparam(
'foo',
value=5),
bindparam(
'bar',
value=7))
def test_escaping_colons(self):
# test escaping out text() params with a backslash
self.assert_compile(
text(r"select * from foo where clock='05:06:07' "
r"and mork='\:mindy'"),
"select * from foo where clock='05:06:07' and mork=':mindy'",
checkparams={},
params={},
dialect="postgresql"
)
def test_escaping_double_colons(self):
self.assert_compile(
text(
r"SELECT * FROM pg_attribute WHERE "
r"attrelid = :tab\:\:regclass"),
"SELECT * FROM pg_attribute WHERE "
"attrelid = %(tab)s::regclass",
params={'tab': None},
dialect="postgresql"
)
def test_text_in_select_nonfrom(self):
generate_series = text("generate_series(:x, :y, :z) as s(a)").\
bindparams(x=None, y=None, z=None)
s = select([
(func.current_date() + literal_column("s.a")).label("dates")
]).select_from(generate_series)
self.assert_compile(
s,
"SELECT CURRENT_DATE + s.a AS dates FROM "
"generate_series(:x, :y, :z) as s(a)",
checkparams={'y': None, 'x': None, 'z': None}
)
self.assert_compile(
s.params(x=5, y=6, z=7),
"SELECT CURRENT_DATE + s.a AS dates FROM "
"generate_series(:x, :y, :z) as s(a)",
checkparams={'y': 6, 'x': 5, 'z': 7}
)
class AsFromTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_basic_toplevel_resultmap_positional(self):
t = text("select id, name from user").columns(
column('id', Integer),
column('name')
)
compiled = t.compile()
eq_(compiled._create_result_map(),
{'id': ('id',
(t.c.id._proxies[0],
'id',
'id'),
t.c.id.type),
'name': ('name',
(t.c.name._proxies[0],
'name',
'name'),
t.c.name.type)})
def test_basic_toplevel_resultmap(self):
t = text("select id, name from user").columns(id=Integer, name=String)
compiled = t.compile()
eq_(compiled._create_result_map(),
{'id': ('id',
(t.c.id._proxies[0],
'id',
'id'),
t.c.id.type),
'name': ('name',
(t.c.name._proxies[0],
'name',
'name'),
t.c.name.type)})
def test_basic_subquery_resultmap(self):
t = text("select id, name from user").columns(id=Integer, name=String)
stmt = select([table1.c.myid]).select_from(
table1.join(t, table1.c.myid == t.c.id))
compiled = stmt.compile()
eq_(
compiled._create_result_map(),
{
"myid": ("myid",
(table1.c.myid, "myid", "myid"), table1.c.myid.type),
}
)
def test_column_collection_ordered(self):
t = text("select a, b, c from foo").columns(column('a'),
column('b'), column('c'))
eq_(t.c.keys(), ['a', 'b', 'c'])
def test_column_collection_pos_plus_bykey(self):
# overlapping positional names + type names
t = text("select a, b, c from foo").columns(
column('a'),
column('b'),
b=Integer,
c=String)
eq_(t.c.keys(), ['a', 'b', 'c'])
eq_(t.c.b.type._type_affinity, Integer)
eq_(t.c.c.type._type_affinity, String)
def _xy_table_fixture(self):
m = MetaData()
t = Table('t', m, Column('x', Integer), Column('y', Integer))
return t
def _mapping(self, stmt):
compiled = stmt.compile()
return dict(
(elem, key)
for key, elements in compiled._create_result_map().items()
for elem in elements[1]
)
def test_select_label_alt_name(self):
t = self._xy_table_fixture()
l1, l2 = t.c.x.label('a'), t.c.y.label('b')
s = text("select x AS a, y AS b FROM t").columns(l1, l2)
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_alias_label_alt_name(self):
t = self._xy_table_fixture()
l1, l2 = t.c.x.label('a'), t.c.y.label('b')
s = text("select x AS a, y AS b FROM t").columns(l1, l2).alias()
mapping = self._mapping(s)
assert l1 in mapping
assert t.c.x not in mapping
def test_select_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
s = text("select x, y FROM t").columns(x, y)
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_alias_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
s = text("select x, y FROM t").columns(x, y).alias()
mapping = self._mapping(s)
assert t.c.x in mapping
def test_select_table_alias_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
ta = t.alias()
s = text("select ta.x, ta.y FROM t AS ta").columns(ta.c.x, ta.c.y)
mapping = self._mapping(s)
assert x not in mapping
def test_select_label_alt_name_table_alias_column(self):
t = self._xy_table_fixture()
x, y = t.c.x, t.c.y
ta = t.alias()
l1, l2 = ta.c.x.label('a'), ta.c.y.label('b')
s = text("SELECT ta.x AS a, ta.y AS b FROM t AS ta").columns(l1, l2)
mapping = self._mapping(s)
assert x not in mapping
assert l1 in mapping
assert ta.c.x not in mapping
def test_cte(self):
t = text("select id, name from user").columns(
id=Integer,
name=String).cte('t')
s = select([table1]).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"WITH t AS (select id, name from user) "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, t WHERE mytable.myid = t.id"
)
def test_alias(self):
t = text("select id, name from user").columns(
id=Integer,
name=String).alias('t')
s = select([table1]).where(table1.c.myid == t.c.id)
self.assert_compile(
s,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, (select id, name from user) AS t "
"WHERE mytable.myid = t.id"
)
def test_scalar_subquery(self):
t = text("select id from user").columns(id=Integer)
subq = t.as_scalar()
assert subq.type._type_affinity is Integer()._type_affinity
s = select([table1.c.myid, subq]).where(table1.c.myid == subq)
self.assert_compile(
s,
"SELECT mytable.myid, (select id from user) AS anon_1 "
"FROM mytable WHERE mytable.myid = (select id from user)"
)
def test_build_bindparams(self):
t = text("select id from user :foo :bar :bat")
t = t.bindparams(bindparam("foo", type_=Integer))
t = t.columns(id=Integer)
t = t.bindparams(bar=String)
t = t.bindparams(bindparam('bat', value='bat'))
eq_(
set(t.element._bindparams),
set(["bat", "foo", "bar"])
)
class TextWarningsTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def _test(self, fn, arg, offending_clause, expected):
with expect_warnings("Textual "):
stmt = fn(arg)
self.assert_compile(stmt, expected)
assert_raises_message(
exc.SAWarning,
r"Textual (?:SQL|column|SQL FROM) expression %(stmt)r should be "
r"explicitly declared (?:with|as) text\(%(stmt)r\)" % {
"stmt": util.ellipses_string(offending_clause),
},
fn, arg
)
def test_where(self):
self._test(
select([table1.c.myid]).where, "myid == 5", "myid == 5",
"SELECT mytable.myid FROM mytable WHERE myid == 5"
)
def test_column(self):
self._test(
select, ["myid"], "myid",
"SELECT myid"
)
def test_having(self):
self._test(
select([table1.c.myid]).having, "myid == 5", "myid == 5",
"SELECT mytable.myid FROM mytable HAVING myid == 5"
)
def test_from(self):
self._test(
select([table1.c.myid]).select_from, "mytable", "mytable",
"SELECT mytable.myid FROM mytable, mytable" # two FROMs
)
class OrderByLabelResolutionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def _test_warning(self, stmt, offending_clause, expected):
with expect_warnings(
"Can't resolve label reference %r;" % offending_clause):
self.assert_compile(
stmt,
expected
)
assert_raises_message(
exc.SAWarning,
"Can't resolve label reference %r; converting to text" %
offending_clause,
stmt.compile
)
def test_order_by_label(self):
stmt = select([table1.c.myid.label('foo')]).order_by('foo')
self.assert_compile(
stmt,
"SELECT mytable.myid AS foo FROM mytable ORDER BY foo"
)
def test_order_by_colname(self):
stmt = select([table1.c.myid]).order_by('name')
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable ORDER BY mytable.name"
)
def test_order_by_alias_colname(self):
t1 = table1.alias()
stmt = select([t1.c.myid]).apply_labels().order_by('name')
self.assert_compile(
stmt,
"SELECT mytable_1.myid AS mytable_1_myid "
"FROM mytable AS mytable_1 ORDER BY mytable_1.name"
)
def test_order_by_named_label_from_anon_label(self):
s1 = select([table1.c.myid.label(None).label("foo"), table1.c.name])
stmt = s1.order_by("foo")
self.assert_compile(
stmt,
"SELECT mytable.myid AS foo, mytable.name "
"FROM mytable ORDER BY foo"
)
def test_order_by_outermost_label(self):
# test [ticket:3335], assure that order_by("foo")
# catches the label named "foo" in the columns clause only,
# and not the label named "foo" in the FROM clause
s1 = select([table1.c.myid.label("foo"), table1.c.name]).alias()
stmt = select([s1.c.name, func.bar().label("foo")]).order_by("foo")
self.assert_compile(
stmt,
"SELECT anon_1.name, bar() AS foo FROM "
"(SELECT mytable.myid AS foo, mytable.name AS name "
"FROM mytable) AS anon_1 ORDER BY foo"
)
def test_unresolvable_warning_order_by(self):
stmt = select([table1.c.myid]).order_by('foobar')
self._test_warning(
stmt, "foobar",
"SELECT mytable.myid FROM mytable ORDER BY foobar"
)
def test_group_by_label(self):
stmt = select([table1.c.myid.label('foo')]).group_by('foo')
self.assert_compile(
stmt,
"SELECT mytable.myid AS foo FROM mytable GROUP BY foo"
)
def test_group_by_colname(self):
stmt = select([table1.c.myid]).group_by('name')
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable GROUP BY mytable.name"
)
def test_unresolvable_warning_group_by(self):
stmt = select([table1.c.myid]).group_by('foobar')
self._test_warning(
stmt, "foobar",
"SELECT mytable.myid FROM mytable GROUP BY foobar"
)
def test_asc(self):
stmt = select([table1.c.myid]).order_by(asc('name'), 'description')
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable "
"ORDER BY mytable.name ASC, mytable.description"
)
def test_group_by_subquery(self):
stmt = select([table1]).alias()
stmt = select([stmt]).apply_labels().group_by("myid")
self.assert_compile(
stmt,
"SELECT anon_1.myid AS anon_1_myid, anon_1.name AS anon_1_name, "
"anon_1.description AS anon_1_description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS anon_1 "
"GROUP BY anon_1.myid"
)
def test_order_by_func_label_desc(self):
stmt = select([func.foo('bar').label('fb'), table1]).\
order_by(desc('fb'))
self.assert_compile(
stmt,
"SELECT foo(:foo_1) AS fb, mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY fb DESC"
)
def test_pg_distinct(self):
stmt = select([table1]).distinct('name')
self.assert_compile(
stmt,
"SELECT DISTINCT ON (mytable.name) mytable.myid, "
"mytable.name, mytable.description FROM mytable",
dialect="postgresql"
)
def test_over(self):
stmt = select([column("foo"), column("bar")])
stmt = select(
[func.row_number().
over(order_by='foo', partition_by='bar')]
).select_from(stmt)
self.assert_compile(
stmt,
"SELECT row_number() OVER (PARTITION BY bar ORDER BY foo) "
"AS anon_1 FROM (SELECT foo, bar)"
)
def test_union_column(self):
s1 = select([table1])
s2 = select([table1])
stmt = union(s1, s2).order_by("name")
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable ORDER BY name"
)
def test_union_label(self):
s1 = select([func.foo("hoho").label('x')])
s2 = select([func.foo("Bar").label('y')])
stmt = union(s1, s2).order_by("x")
self.assert_compile(
stmt,
"SELECT foo(:foo_1) AS x UNION SELECT foo(:foo_2) AS y ORDER BY x"
)
def test_standalone_units_stringable(self):
self.assert_compile(
desc("somelabel"),
"somelabel DESC"
)
def test_columnadapter_anonymized(self):
"""test issue #3148
Testing the anonymization applied from the ColumnAdapter.columns
collection, typically as used in eager loading.
"""
exprs = [
table1.c.myid,
table1.c.name.label('t1name'),
func.foo("hoho").label('x')]
ta = table1.alias()
adapter = sql_util.ColumnAdapter(ta, anonymize_labels=True)
s1 = select([adapter.columns[expr] for expr in exprs]).\
apply_labels().order_by("myid", "t1name", "x")
def go():
# the labels here are anonymized, so label naming
# can't catch these.
self.assert_compile(
s1,
"SELECT mytable_1.myid AS mytable_1_myid, "
"mytable_1.name AS name_1, foo(:foo_2) AS foo_1 "
"FROM mytable AS mytable_1 ORDER BY mytable_1.myid, t1name, x"
)
assert_warnings(
go,
["Can't resolve label reference 't1name'",
"Can't resolve label reference 'x'"], regex=True)
def test_columnadapter_non_anonymized(self):
"""test issue #3148
Testing the anonymization applied from the ColumnAdapter.columns
collection, typically as used in eager loading.
"""
exprs = [
table1.c.myid,
table1.c.name.label('t1name'),
func.foo("hoho").label('x')]
ta = table1.alias()
adapter = sql_util.ColumnAdapter(ta)
s1 = select([adapter.columns[expr] for expr in exprs]).\
apply_labels().order_by("myid", "t1name", "x")
# labels are maintained
self.assert_compile(
s1,
"SELECT mytable_1.myid AS mytable_1_myid, "
"mytable_1.name AS t1name, foo(:foo_1) AS x "
"FROM mytable AS mytable_1 ORDER BY mytable_1.myid, t1name, x"
)
| 33.880309 | 78 | 0.543324 |
4a273e3696030301d820b625177ce0089119a074 | 432 | py | Python | oscar/apps/customer/models.py | endgame/django-oscar | e5d78436e20b55902537a6cc82edf4e22568f9d6 | [
"BSD-3-Clause"
] | 1 | 2015-11-07T12:37:50.000Z | 2015-11-07T12:37:50.000Z | oscar/apps/customer/models.py | michaelBenin/django-oscar | f45d38b5d3ffa10756d95c625fb90a27185ce1e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/customer/models.py | michaelBenin/django-oscar | f45d38b5d3ffa10756d95c625fb90a27185ce1e1 | [
"BSD-3-Clause"
] | 1 | 2019-07-10T06:32:14.000Z | 2019-07-10T06:32:14.000Z | from oscar.apps.customer import abstract_models
class Email(abstract_models.AbstractEmail):
pass
class CommunicationEventType(abstract_models.AbstractCommunicationEventType):
pass
class Notification(abstract_models.AbstractNotification):
pass
class ProductAlert(abstract_models.AbstractProductAlert):
pass
from oscar.apps.customer.history_helpers import *
from oscar.apps.customer.alerts.receivers import * | 20.571429 | 77 | 0.824074 |
4a273e6042015a47d96df17819a8319af78d82de | 350 | py | Python | job_client_redis/store.py | jupyter-observablehq-bridge/job-client-py-redis | 6b2156a35a45091ebe4fe3c806ca34b1eb47a423 | [
"MIT"
] | null | null | null | job_client_redis/store.py | jupyter-observablehq-bridge/job-client-py-redis | 6b2156a35a45091ebe4fe3c806ca34b1eb47a423 | [
"MIT"
] | null | null | null | job_client_redis/store.py | jupyter-observablehq-bridge/job-client-py-redis | 6b2156a35a45091ebe4fe3c806ca34b1eb47a423 | [
"MIT"
] | null | null | null |
class Store(dict):
"""
"""
def __init__(self, id, action=None):
"""
"""
self.id = id
self.action = action
def __setitem__(self, name, value):
"""
"""
super().__setitem__(name, value)
if name != 'action' and self.action:
self.action(self.id, name, value)
| 18.421053 | 45 | 0.477143 |
4a273f0e134cf4c222d6779632c9ce77f8090e9b | 10,061 | py | Python | report_builder.py | benjaminramsden/500k | 18379d98e485a7244e2f8ef79a56cec47cbdb630 | [
"MIT"
] | null | null | null | report_builder.py | benjaminramsden/500k | 18379d98e485a7244e2f8ef79a56cec47cbdb630 | [
"MIT"
] | 12 | 2017-05-19T03:42:41.000Z | 2017-09-07T20:19:41.000Z | report_builder.py | benjaminramsden/500k | 18379d98e485a7244e2f8ef79a56cec47cbdb630 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
from sheets_api import *
from report import Report
from village import Village
from missionary import Missionary, Child, Spouse
from imgur import update_imgur_ids
from powerpoint import *
from Queue import Queue
import threading
import argparse
# This script conducts the following:
# - Gets the information on a missionary based on Miss ID (gets all)
# - Populates the title slide with the missionaries details
# - Creates a content slide per report
# - Exports the slideshow to pdf
# - Uploads to Ben's Google Drive account via API
# - Pastes the URL of the report in Google Drive to the web report sheets
#
# With this info Ben can then send out multiple reports using:
# https://support.yet-another-mail-merge.com/hc/en-us/articles/210735349
def main():
logging.basicConfig(filename='diags.log',
level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(threadName)s %(message)s',
datefmt='%m-%d %H:%M')
parser = argparse.ArgumentParser(description='Report builder for 500k')
parser.add_argument('-d',
'--date',
help='Month and year to generate reports for in MM/YYYY integer form e.g. 01/2017',
required=False)
parser.add_argument('--test', help='Use sample data only', action='store_true')
args = parser.parse_args()
# Validate date input
if args.date:
try:
if len(args.date) != 7:
raise TypeError
int(args.date[:2])
int(args.date[3:])
except TypeError:
raise ValueError("Date must be in MM/YYYY format")
# Make sure all Imgur IDs are up-to-date.
imgur_imgs = update_imgur_ids()
# Gather all information from the spreadsheet. Returned as list of lists
# where each list is a row of cells.
if args.test:
report_data = get_all_missionary_reports(test=True)
factfile_data = get_all_factfile_data(test=True)
else:
report_data = get_all_missionary_reports()
factfile_data = get_all_factfile_data()
# Now build out the data into usable dictionaries
all_missionaries = construct_data(report_data, factfile_data, imgur_imgs)
# Time to create the presentations, loop around for every single missionary
# TODO - In future make sure only missionaries with new reports get
# generated
logging.info("Creating powerpoints for {0} missionaries".format(
len(all_missionaries)))
q = Queue(maxsize=0)
num_threads = 10
for i in range(num_threads):
worker = threading.Thread(target=create_powerpoint_pdf, args=(q,))
worker.setDaemon(True)
worker.start()
if args.date:
date = args.date
else:
date = None
for miss_id, missionary in all_missionaries.iteritems():
q.put((missionary, miss_id, date))
q.join()
return 0
def construct_data(report_data, factfile_data, imgur_imgs):
"""
Take from the two different spreadsheets to create a total view of all the
missionary data, once complete we have all the info required to start
creating the reports.
"""
all_missionaries = {}
construct_factfile_data(all_missionaries, factfile_data)
construct_report_data(all_missionaries, report_data)
# add_imgur_profiles(all_missionaries, imgur_imgs)
return all_missionaries
def construct_report_data(all_missionaries, report_data):
# For all the missionaries, arrange data in this structure:
# All
# -> Missionary 1 (based on ID)
# -> Report 1
# -> Date, Subject, Raw, Missionary, Missionary ID etc...
# -> Village 1
# -> Village
# -> People
# -> Baptisms
# -> Village 2
# ...
# -> All Prayer Points
# ...
# -> Report 2
# ...
# -> Missionary 2
# ...
logging.info("Constructing report data")
# As we may change the order of the columns from time to time and need to
# make this sustainable for any changes, create a dictionary of column
# numbers against the column header text. This should be a single linear
# search to get all the headings.
columns = dict()
for idx, column in enumerate(report_data[0]):
columns[column] = idx
for row in report_data[1:]:
if len(row) > columns[u'\u2022Main Story / Report: ']:
try:
report = Report(row[columns['Date (Pretty)']],
row[columns[u'\u2022Missionary Name: ']],
row[columns[u'\u2022MissionaryID: ']],
row[columns[u'\u2022Main Story / Report: ']])
except NotImplementedError:
continue
villages = []
prayer_rqs = []
for i in range(1, 6):
if row[columns[u'\u2022V' + str(i) + ': ']]:
villages.append(
Village(row[columns[u'\u2022V' + str(i) + ': ']],
row[columns[u'\u2022V' + str(i) + 'N: ']],
row[columns[u'\u2022V' + str(i) + 'B: ']]))
for i in range(1, 9):
if (len(row) > columns["P-R-" + str(i) + ": "] and
row[columns["P-R-" + str(i) + ": "]]):
prayer_rqs.append(row[columns["P-R-" + str(i) + ": "]])
report.villages = villages
report.prayer_rqs = prayer_rqs
report.round = report.get_report_round()
missionary_id = report.id
if missionary_id in all_missionaries.keys():
# Missionary already exists, add report to dictionary
missionary = all_missionaries[missionary_id]
else:
# New missionary, create new missionary and add report.
logging.warning("No factfile data for {0}".format(
missionary_id))
names = report.name.split(" ")
if len(names) > 1:
try:
missionary = Missionary(missionary_id,
names[-1],
names[-2])
except NotImplementedError:
continue
else:
try:
missionary = Missionary(missionary_id,
names[-1],
None)
except NotImplementedError:
continue
all_missionaries[missionary_id] = missionary
missionary.reports[report.round] = report
logging.info("Report data has been constructed")
def construct_factfile_data(all_missionaries, factfile_data):
"""
Start building the Missionary data using factfile information
"""
logging.info("Constructing factfile data")
# As we may change the order of the columns from time to time and need to
# make this sustainable for any changes, create a dictionary of column
# numbers against the column header text. This should be a single linear
# search to get all the headings.
columns = dict()
for idx, column in enumerate(factfile_data[0]):
columns[column] = idx
for row in factfile_data[1:]:
if len(row) > columns[u'Profile Picture']:
# Basics mandatory for a factfile
try:
missionary = Missionary(row[columns[u'ID (new)']],
row[columns[u'MissionarySecondName']],
row[columns[u'MissionaryFirstName']])
except NotImplementedError:
logging.error("Couldn't create {0} factfile data".format(
row[columns[u'ID (new)']]))
try:
missionary.state = validate_state(
row[columns[u'MissionField State']])
except ValueError:
logging.error("Invalid state for {0}: {1}".format(
row[columns[u'ID (new)']],
row[columns[u'MissionField State']]))
missionary.pic = row[columns[u'Headshot Photo link']]
# Add family and biography
if len(row) > columns[u'Number of Dependents']:
if row[columns[u'Wife / Husband\'s First Name']]:
missionary.spouse = Spouse(
row[columns[u'Wife / Husband\'s First Name']],
row[columns[u'Wife / Husband\'s Second Name']])
for i in range(1, 6):
if row[columns[u'Child ' + str(i) + ' First Name']]:
missionary.children[u'Child ' + str(i)] = Child(
row[columns[u'Child ' + str(i) + ' First Name']],
row[columns[u'Child ' + str(i) + ' DOB']])
# Mission field data
villages = []
for i in range(1, 6):
if (len(row) > columns[u'V' + str(i) + ' B'] and
row[columns[u'V' + str(i)]]):
villages.append(
Village(row[columns[u'V' + str(i)]],
row[columns[u'V' + str(i) + ' N']],
row[columns[u'V' + str(i) + ' B']]))
missionary.villages = villages
all_missionaries[missionary.id] = missionary
logging.info("Factfile data has been constructed")
def add_imgur_profiles(all_missionaries, imgur_imgs):
for miss_id, missionary in all_missionaries.iteritems():
try:
missionary.pic = imgur_imgs[miss_id]
except KeyError:
logging.info('{0} has no Imgur picture'.format(miss_id))
if __name__ == '__main__':
status = main()
sys.exit(status)
| 39.610236 | 107 | 0.553722 |
Subsets and Splits