hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79465a5ca2d9d6567de661268dda2623f62c51b8 | 1,036 | py | Python | train_mineral_shards.py | llSourcell/A-Guide-to-DeepMind-s-StarCraft-AI-Environment- | cd8bd7ac637ddb9bec7e8f7a3e9eee03569fcade | [
"Apache-2.0"
] | 217 | 2017-09-13T09:59:28.000Z | 2021-11-09T09:08:19.000Z | train_mineral_shards.py | llSourcell/A-Guide-to-DeepMind-s-StarCraft-AI-Environment- | cd8bd7ac637ddb9bec7e8f7a3e9eee03569fcade | [
"Apache-2.0"
] | 7 | 2017-09-19T14:13:11.000Z | 2019-08-23T01:58:08.000Z | train_mineral_shards.py | llSourcell/A-Guide-to-DeepMind-s-StarCraft-AI-Environment- | cd8bd7ac637ddb9bec7e8f7a3e9eee03569fcade | [
"Apache-2.0"
] | 86 | 2017-09-13T09:59:31.000Z | 2022-03-07T02:14:13.000Z | import sys
import gflags as flags
from baselines import deepq
from pysc2.env import sc2_env
from pysc2.lib import actions
import deepq_mineral_shards
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_SELECT_ALL = [0]
_NOT_QUEUED = [0]
step_mul = 8
FLAGS = flags.FLAGS
def main():
FLAGS(sys.argv)
with sc2_env.SC2Env(
"CollectMineralShards",
step_mul=step_mul,
visualize=True) as env:
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True
)
act = deepq_mineral_shards.learn(
env,
q_func=model,
num_actions=4,
lr=1e-5,
max_timesteps=2000000,
buffer_size=100000,
exploration_fraction=0.5,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=100000,
target_network_update_freq=1000,
gamma=0.99,
prioritized_replay=True
)
act.save("mineral_shards.pkl")
if __name__ == '__main__':
main()
| 19.923077 | 49 | 0.667954 |
79465ad17da4a565a37ca04e82e7732a94d9f54e | 1,647 | py | Python | Gof_script.py | jiaojiaogou/UDSBC | 6f6c2be39c5d1fa718825f63787e28ed8f37dc1a | [
"Apache-2.0"
] | null | null | null | Gof_script.py | jiaojiaogou/UDSBC | 6f6c2be39c5d1fa718825f63787e28ed8f37dc1a | [
"Apache-2.0"
] | null | null | null | Gof_script.py | jiaojiaogou/UDSBC | 6f6c2be39c5d1fa718825f63787e28ed8f37dc1a | [
"Apache-2.0"
] | null | null | null | ## this script writed to caculate the Gof (goodness of fit) of the GS streamflow data
## writed by Jiaojiao Gou 2022-05-05
import os
import math
import string
import numpy as np
import pandas as pd
import xarray as xr
from UDSBC.util import filter_nan
from UDSBC.Postprocess import gof_index
## read station position
station = pd.read_csv('./input/gauge_id.csv')['station']
gauge_id = pd.read_csv('./input/gauge_id.csv')['gaugeid']
# read obs
obs = np.empty([480,len(gauge_id)])
obs_file = pd.read_csv("./input/Q_obs.csv")
for i in range(len(gauge_id)):
obs[:,i] = obs_file[station[i]]
# read Qout simulation
Qout = xr.open_dataset('./output/Qbc_month1.nc').load()['qout']
Rivers = xr.open_dataset('./output/Qmon.nc').load()['rivers']
sim = np.empty([480,len(gauge_id)])
for i in range(len(gauge_id)):
k = np.where(Rivers == gauge_id[i])[0][0]
sim[:,i] = Qout[0:480,k]
# cal gof
gof = np.empty([len(gauge_id),10])
for i in range(len(gauge_id)):
s1,o1 = filter_nan(sim[0:480,i], obs[0:480,i]) #0:108 108:228 0:228
if len(o1) == 0:
gof[i,:] = 'NaN'
else:
gof[i,0] = gof_index.pc_bias(s1, o1)
gof[i,1] = gof_index.apb(s1, o1)
gof[i,2] = gof_index.rmse(s1, o1)
gof[i,3] = gof_index.mae(s1, o1)
gof[i,4] = gof_index.bias(s1, o1)
gof[i,5] = gof_index.NS(s1, o1)
gof[i,6] = gof_index.L(s1, o1, N=5)
gof[i,7] = gof_index.correlation(s1, o1)
gof[i,8] = gof_index.index_agreement(s1, o1)
gof[i,9] = gof_index.KGE(s1, o1)[0]
np.savetxt("./output/gof1.csv",gof,delimiter=',')
| 29.945455 | 87 | 0.613236 |
79465b50e9ece70635c3973a2d674f0bba5f9f76 | 884 | py | Python | solenoid/records/migrations/0016_auto_20210716_1724.py | MITLibraries/solenoid | 9e21d95304058ded81267d9ea4030784a1652f1c | [
"Apache-2.0"
] | null | null | null | solenoid/records/migrations/0016_auto_20210716_1724.py | MITLibraries/solenoid | 9e21d95304058ded81267d9ea4030784a1652f1c | [
"Apache-2.0"
] | 416 | 2017-05-01T21:45:00.000Z | 2022-02-11T22:28:12.000Z | solenoid/records/migrations/0016_auto_20210716_1724.py | MITLibraries/solenoid | 9e21d95304058ded81267d9ea4030784a1652f1c | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-16 17:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('records', '0015_auto_20200403_1856'),
]
operations = [
migrations.AlterField(
model_name='record',
name='acq_method',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='record',
name='doi',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='record',
name='paper_id',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='record',
name='publisher_name',
field=models.CharField(max_length=255),
),
]
| 26 | 63 | 0.563348 |
79465ba06d8047d04b85159d4d84a09cbc9b9a8f | 540 | py | Python | patsy/dependancies/database.py | python-discord/patsy | 159fb05022d7302e7ef5df4ddce108b293a5bc19 | [
"MIT"
] | 1 | 2022-01-16T21:02:53.000Z | 2022-01-16T21:02:53.000Z | patsy/dependancies/database.py | python-discord/patsy | 159fb05022d7302e7ef5df4ddce108b293a5bc19 | [
"MIT"
] | null | null | null | patsy/dependancies/database.py | python-discord/patsy | 159fb05022d7302e7ef5df4ddce108b293a5bc19 | [
"MIT"
] | 2 | 2021-11-07T21:16:02.000Z | 2021-12-05T20:00:45.000Z | from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.future import Engine
from sqlalchemy.orm import sessionmaker
from patsy.settings import CONFIG, DATABASE
engine: Engine = create_async_engine(DATABASE.database_url, echo=CONFIG.debug, future=True)
async def get_session():
"""Yield an engine session, should used as a dependency."""
async_session = sessionmaker(
engine, class_=AsyncSession, expire_on_commit=False
)
async with async_session() as session:
yield session
| 31.764706 | 91 | 0.77037 |
79465c09cf0080010b150384600f1d8ba5334584 | 1,139 | py | Python | tests/test_columns_statistics_api.py | Ramanth/analyticsapi-engines-python-sdk | 93af954e3bd2107146d4914437feb8e27c159741 | [
"Apache-2.0"
] | 12 | 2019-12-01T10:34:50.000Z | 2022-03-14T00:11:21.000Z | tests/test_columns_statistics_api.py | Ramanth/analyticsapi-engines-python-sdk | 93af954e3bd2107146d4914437feb8e27c159741 | [
"Apache-2.0"
] | 33 | 2019-12-01T12:14:01.000Z | 2022-03-22T04:50:30.000Z | tests/test_columns_statistics_api.py | Ramanth/analyticsapi-engines-python-sdk | 93af954e3bd2107146d4914437feb8e27c159741 | [
"Apache-2.0"
] | 1 | 2019-12-01T10:34:52.000Z | 2019-12-01T10:34:52.000Z | import unittest
from fds.analyticsapi.engines.api.column_statistics_api import ColumnStatisticsApi
from fds.analyticsapi.engines.model.column_statistic import ColumnStatistic
from fds.analyticsapi.engines.model.column_statistic_root import ColumnStatisticRoot
from common_functions import CommonFunctions
class TestColumnStatisticsApi(unittest.TestCase):
def setUp(self):
self.column_statistics_api = ColumnStatisticsApi(
CommonFunctions.build_api_client())
def test_get_column_statistics(self):
response = self.column_statistics_api.get_pa_column_statistics(
_return_http_data_only=False)
self.assertEqual(response[1], 200,
"Response code should be 200 - Success")
self.assertEqual(type(response[0]), ColumnStatisticRoot,
"Response should be of ColumnStatisticRoot type")
self.assertEqual(
type(response[0]['data'][list(response[0]['data'].keys())[0]]),
ColumnStatistic,
"Response should be of ColumnStatistic type"
)
if __name__ == '__main__':
unittest.main()
| 36.741935 | 84 | 0.705882 |
79465deacd60b28afaf9536b5f351b27e3c77d78 | 1,198 | py | Python | tests/utils.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | 3 | 2017-04-01T16:05:52.000Z | 2019-07-26T14:32:26.000Z | tests/utils.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | null | null | null | tests/utils.py | yvdlima/python-zeep | aae3def4385b0f8922e0e83b9cdcd68b2263f739 | [
"MIT"
] | 2 | 2020-11-18T09:49:46.000Z | 2021-07-08T14:02:03.000Z | import six
from lxml import etree
from six import binary_type, string_types
def load_xml(xml):
parser = etree.XMLParser(
remove_blank_text=True, remove_comments=True, resolve_entities=False
)
return etree.fromstring(xml.strip(), parser=parser)
def assert_nodes_equal(result, expected):
def _convert_node(node):
if isinstance(node, (string_types, binary_type)):
return load_xml(node)
return node
# assert node_1 == node_2
result = etree.tostring(_convert_node(result), pretty_print=True)
expected = etree.tostring(_convert_node(expected), pretty_print=True)
if six.PY3:
result = result.decode("utf-8")
expected = expected.decode("utf-8")
assert result == expected
def render_node(element, value):
node = etree.Element("document")
element.render(node, value)
return node
class DummyTransport(object):
def __init__(self):
self._items = {}
def bind(self, url, node):
self._items[url] = node
def load(self, url):
data = self._items[url]
if isinstance(data, (binary_type, string_types)):
return data
return etree.tostring(data)
| 25.489362 | 76 | 0.66611 |
79465e34f57ecd7f2b619b619a9f12dc1e5a296b | 927 | bzl | Python | antlir/bzl/linux/time.bzl | zeroxoneb/antlir | 811d88965610d16a5c85d831d317f087797ca732 | [
"MIT"
] | 28 | 2020-08-11T16:22:46.000Z | 2022-03-04T15:41:52.000Z | antlir/bzl/linux/time.bzl | zeroxoneb/antlir | 811d88965610d16a5c85d831d317f087797ca732 | [
"MIT"
] | 137 | 2020-08-11T16:07:49.000Z | 2022-02-27T10:59:05.000Z | antlir/bzl/linux/time.bzl | zeroxoneb/antlir | 811d88965610d16a5c85d831d317f087797ca732 | [
"MIT"
] | 10 | 2020-09-10T00:01:28.000Z | 2022-03-08T18:00:28.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//antlir/bzl:image.bzl", "image")
load("//antlir/bzl/image/feature:defs.bzl", "feature")
def _timezone(zone, timezone_dir = "/usr/share/zoneinfo"):
"""
Build Antlir image features to support setting the timezone to the provided
`zone`.
In the unlikley event that the target `image.layer` this is used on has a
non-standard timezone dir, override this via the `timezone_dir` param.
"""
dest = "/etc/localtime"
return [
feature.remove(
dest,
must_exist = False,
),
image.ensure_file_symlink(
paths.join(timezone_dir, zone),
dest,
),
]
time = struct(
timezone = _timezone,
)
| 26.485714 | 79 | 0.63754 |
79465e3e6cd337377c7e3cb0590c9eafacc5ac58 | 68 | py | Python | maodevice/utils/__init__.py | mao-wfs/mao-device | 1c1ac88067da307d210234bebd7091fe5dc982c9 | [
"MIT"
] | null | null | null | maodevice/utils/__init__.py | mao-wfs/mao-device | 1c1ac88067da307d210234bebd7091fe5dc982c9 | [
"MIT"
] | null | null | null | maodevice/utils/__init__.py | mao-wfs/mao-device | 1c1ac88067da307d210234bebd7091fe5dc982c9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from . import decorators
from . import misc
| 17 | 24 | 0.647059 |
79465f3febc1d595e17b8b509dd721aab00aa9ce | 3,605 | py | Python | app/route/stories/processor.py | LifeLaboratory/rosbank_backend | b19aed99b7084c14f0827933b7f28d6e51de92bd | [
"MIT"
] | 2 | 2019-12-06T23:22:33.000Z | 2019-12-08T07:18:31.000Z | app/route/stories/processor.py | LifeLaboratory/rosbank_backend | b19aed99b7084c14f0827933b7f28d6e51de92bd | [
"MIT"
] | 56 | 2019-12-06T17:54:07.000Z | 2019-12-08T04:55:24.000Z | app/route/stories/processor.py | LifeLaboratory/rosbank_backend | b19aed99b7084c14f0827933b7f28d6e51de92bd | [
"MIT"
] | 1 | 2019-12-08T05:04:46.000Z | 2019-12-08T05:04:46.000Z | from app.route.stories.provider import Provider
from app.api.base import base_name as names
from app.api.helper import get_id_user_by_profile, get_id_user_by_profiles
from app.api.helper import get_admins, get_all_profile
from app.route.notifications.processor import add_notification
def publicate_storie(args):
"""
Опубликовать исторю
:param args:
:return:
"""
provider = Provider()
if isinstance(args.get(names.ID_PROFILE), list):
id_users = get_id_user_by_profiles(args)
else:
id_users = get_id_user_by_profile(args)
for id_user in id_users:
args[names.ID_USER] = id_user.get(names.ID_USER)
provider.publicate_storie(args)
return names.OK
def insert_stories(args):
"""
Создать историю
:param args:
:return:
"""
provider = Provider()
answer = provider.insert_stories(args)[0]
args[names.ID_STORIES] = answer.get(names.ID_STORIES)
args[names.POSITION] = 0
for i in range(len(args.get(names.URL))):
args[names.URL] = args.get(names.URL)[i]
args[names.DESCRIPTION] = args.get(names.DESCRIPTION)[i] if args.get(names.DESCRIPTION) else ''
provider.insert_image(args)
args[names.POSITION] += 1
if args.get(names.TYPE) == '2':
args[names.ID_PROFILE] = 4
args[names.STATUS] = 'open'
add_notification(args)
return names.OK
def stories_profile(args):
"""
Получить истории для профиля
:param args:
:return:
"""
provider = Provider()
answer = provider.stories_profile(args)
return answer
def update_stories(args):
"""
Обновить историю
:param args:
:return:
"""
provider = Provider()
provider.update_stories(args)
provider.delete_images(args)
args[names.POSITION] = 0
for i in range(len(args.get(names.URL))):
args[names.URL] = args.get(names.URL)[i]
args[names.DESCRIPTION] = args.get(names.DESCRIPTION)[i]
provider.insert_image(args)
args[names.POSITION] += 1
return names.OK
def change_status(args):
"""
Изменить статус в действий пользователя
:param args:
:return:
"""
provider = Provider()
status = provider.select_status(args)
args[names.IS_OPEN] = args[names.STATUS] == 'open'
args[names.IS_VIEW] = args[names.STATUS] == 'view'
if args.get(names.ID_NOTIFICATION) is not None:
args[names.ACTIVE] = False if args[names.IS_VIEW] else True
provider.update_notifications_user(args)
if args.get(names.STATUS) is not None:
if status:
provider.update_status(args)
else:
provider.insert_status(args)
if args.get(names.IS_LIKE):
provider.update_like(args)
return names.OK
def get_stories_list(args):
"""
Поулчить истории для пользователя
:param args:
:return:
"""
provider = Provider()
answer = provider.get_stories_list(args)
return answer
def get_admins_ids(args):
"""
Метод возвращает список id администраторов
:param args:
:return:
"""
result = []
admins_ids = get_admins(args)
for admin_id in admins_ids:
result.append(admin_id.get(names.ID_USER))
return result
def get_all_stories(args):
"""
Получить все истории для пнаели админа
:param args:
:return:
"""
provider = Provider()
args['left_string'] = ''
if args.get(names.ID_USER) and int(args.get(names.ID_USER)) in get_admins_ids(args):
args['left_string'] = 'left'
answer = provider.get_all_stories(args)
return answer
| 26.703704 | 103 | 0.654092 |
79465f6b393a46f3822c35aee53bf6b4a97567a4 | 2,191 | py | Python | autotf/ensemble/ML/example/test_stacking/classification.py | DAIM-ML/autotf | 3f82d858f49c27d5ecb624cee555fb8fd47bf067 | [
"BSD-3-Clause"
] | 8 | 2018-03-07T06:58:16.000Z | 2019-01-30T07:49:44.000Z | autotf/ensemble/ML/example/test_stacking/classification.py | DAIM-ML/autotf | 3f82d858f49c27d5ecb624cee555fb8fd47bf067 | [
"BSD-3-Clause"
] | null | null | null | autotf/ensemble/ML/example/test_stacking/classification.py | DAIM-ML/autotf | 3f82d858f49c27d5ecb624cee555fb8fd47bf067 | [
"BSD-3-Clause"
] | 1 | 2018-03-31T09:06:12.000Z | 2018-03-31T09:06:12.000Z | from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, log_loss
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from ensemble import Stacking
import time
import warnings
warnings.filterwarnings(module='sklearn*', action='ignore', category=DeprecationWarning)
digits = load_digits()
X, y = digits.data, digits.target
# Make train/test split
# As usual in machine learning task we have X_train, y_train, and X_test
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=True, test_size=0.2, random_state=0)
models = [
ExtraTreesClassifier(random_state=0, n_jobs=-1,
n_estimators=100, max_depth=3),
RandomForestClassifier(random_state=0, n_jobs=-1,
n_estimators=100, max_depth=3),
XGBClassifier(random_state=0, n_jobs=-1, learning_rate=0.1,
n_estimators=100, max_depth=3)
]
meta_model = XGBClassifier(random_state=0, n_jobs=1, learning_rate=0.1,
n_estimators=100, max_depth=3)
start_time = time.time()
ensemble = Stacking(X_train, y_train, X_test, regression=False, bagged_pred=True,
needs_proba=False, save_dir=None, metric=accuracy_score,
n_folds=4, stratified=True, shuffle=True,
random_state=0, verbose=1)
start_time = time.time()
ensemble.add(models, propagate_features=[0, 1, 2, 3])
print ('process(add) took %fs!' % (time.time() - start_time))
start_time = time.time()
y_pred = ensemble.add_meta(meta_model)
print ('process(add_meta) took %fs!' % (time.time() - start_time))
print('Final prediction score: [%.8f]' % accuracy_score(y_test, y_pred))
# '''Compare with the randomforest'''
#
# print("randomforest")
# model = RandomForestClassifier(random_state=0, n_jobs=-1,
# n_estimators=200, max_depth=3)
# model.fit(X_train,y_train)
# y_pred = model.predict(X_test)
# print('Final prediction score: [%.8f]' % accuracy_score(y_test, y_pred)) | 38.438596 | 103 | 0.687814 |
79465fc3ff06f91a3655d64857e571f4b869a4bd | 3,670 | py | Python | xylearn/feature_extraction/tfidf.py | thautwarm/xylearn | ade885eb145a750cfe6c7c30896806cee6dfef59 | [
"MIT"
] | null | null | null | xylearn/feature_extraction/tfidf.py | thautwarm/xylearn | ade885eb145a750cfe6c7c30896806cee6dfef59 | [
"MIT"
] | null | null | null | xylearn/feature_extraction/tfidf.py | thautwarm/xylearn | ade885eb145a750cfe6c7c30896806cee6dfef59 | [
"MIT"
] | null | null | null | from sklearn.feature_extraction.text import TfidfTransformer
from collections import Counter
from typing import List
from sklearn.feature_extraction import DictVectorizer
from nltk.tokenize import word_tokenize
import nltk
import numpy as np
import xython as xy
import pandas as pd
import string
Word = str
Doc = List[Word]
ReadOnly = property
_EmptySet = set()
class Word2Vec:
def __init__(self, sparse=True, sort=True, use_tf_idf=True,
punctuation=set(string.punctuation),
stop_words=nltk.corpus.stopwords.words('english')):
self.abandon = set.union(_EmptySet, *(each for each in (punctuation, stop_words) if each))
if not self.abandon:
self.abandon = None
self.vectorizer = DictVectorizer(sparse=sparse, sort=sort)
self.tf_idf = TfidfTransformer() if use_tf_idf else None
self._fitted = False
self._transform = None
self._hold_data = None
def fit(self, documents: List[Doc], hold_data=False):
if self.abandon:
documents = map | xy.partial(documents) | xy.call(
lambda _: filter(lambda word: word not in self.abandon, _))
data = [Counter(doc) for doc in documents]
self.vectorizer.fit(data)
if self.tf_idf:
data = self.vectorizer.transform(data)
self.tf_idf.fit(data)
self._transform = xy.and_then(self.vectorizer.transform, self.tf_idf.transform)
else:
self._transform = self.vectorizer.transform
if hold_data:
self._hold_data = data
self._fitted = True
def transform(self, documents: List[Doc]):
if not self.fitted:
self.fit(documents, hold_data=True)
data = self.tf_idf.transform(self._hold_data)
del self._hold_data
return data
return self._transform([Counter(doc) for doc in documents])
@ReadOnly
def fitted(self):
return self._fitted
@ReadOnly
def feature_names(self):
return np.array(self.vectorizer.get_feature_names())
def counter(documents: List[Doc]):
return [Counter(doc) for doc in documents]
if __name__ == '__main__':
corpus = """The ACT originally consisted of four tests: English, Mathematics, Social Studies, and Natural Sciences.
In 1989 however, the Social Studies test was changed into a Reading section (which included a Social Studies subsection) and the Natural Sciences test was renamed the Science Reasoning test, with more emphasis on problem solving skills.[12]
In February 2005, an optional Writing test was added to the ACT, mirroring changes to the SAT that took place later in March of the same year.
In 2013, ACT announced that students would be able to take the ACT by computer starting in the spring of 2015. The test will continue to be offered in the paper format for schools that are not ready to transition to computer testing.[13]
The ACT has seen a gradual increase in the number of test takers since its inception, and in 2011 the ACT surpassed the SAT for the first time in total test takers; that year, 1,666,017 students took the ACT and 1,664,479 students took the SAT.[14]
All four-year colleges and universities in the U.S. accept the ACT,[15] but different institutions place different emphases on standardized tests such as the ACT, compared to other factors of evaluation such as class rank, GPA, and extracurricular activities.
""".splitlines()
documents = [word_tokenize(doc) for doc in corpus]
w2v = Word2Vec()
print(pd.DataFrame(w2v.transform(documents).toarray(),
columns=w2v.feature_names))
| 41.704545 | 259 | 0.70109 |
7946604a9b73e03a999050d2dcb6f2cdd8dddc1b | 3,820 | py | Python | common/src/stack/command/stack/commands/config/host/interface/__init__.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | null | null | null | common/src/stack/command/stack/commands/config/host/interface/__init__.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | null | null | null | common/src/stack/command/stack/commands/config/host/interface/__init__.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | null | null | null | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
from stack.exception import ArgUnique
class Command(stack.commands.config.host.command):
"""
!!! STACKIQ INTERNAL COMMAND ONLY !!!
Configures host interfaces in the database.
This command should only be called from a post section in a kickstart
file.
<arg type='string' name='host'>
Host name of machine
</arg>
<param type='string' name='interface'>
Interface names (e.g., "eth0"). If multiple interfaces are supplied,
then they must be comma-separated.
</param>
<param type='string' name='mac'>
MAC addresses for the interfaces. If multiple MACs are supplied,
then they must be comma-separated.
</param>
<param type='string' name='module'>
Driver modules to be loaded for the interfaces. If multiple modules
are supplied, then they must be comma-separated.
</param>
<param type='string' name='flag'>
Flags for the interfaces. If flags for multiple interfaces
are supplied, then they must be comma-separated.
</param>
"""
def run(self, params, args):
(interface, mac, module, flag) = self.fillParams([
('interface', None),
('mac', None),
('module', None),
('flag', None) ])
hosts = self.getHostnames(args)
if len(hosts) != 1:
raise ArgUnique(self, 'host')
host = hosts[0]
sync_config = 0
discovered_macs = []
if mac:
macs = mac.split(',')
else:
macs = []
if interface:
interfaces = interface.split(',')
else:
interfaces = []
if module:
modules = module.split(',')
else:
modules = []
if flag:
flags = flag.split(',')
else:
flags = []
for i in range(0, len(macs)):
a = (macs[i], )
if len(interfaces) > i:
a += (interfaces[i], )
else:
a += ('', )
if len(modules) > i:
a += (modules[i], )
else:
a += ('', )
if len(flags) > i:
a += (flags[i], )
else:
a += ('', )
discovered_macs.append(a)
pre_config = self.command('list.host.interface', [host])
#
# First, assign the correct names to the mac addresses
#
for (mac, interface, module, ks) in discovered_macs:
rows = self.db.select("""mac from networks
where mac = %s """,mac)
if rows:
self.command('set.host.interface.interface',
[host, 'interface=%s' % interface, 'mac=%s' % mac])
else:
continue
if module:
self.command('set.host.interface.module',
[host, 'interface=%s' % interface, 'module=%s' % module])
#
# Add any missing/new interfaces to the database
#
for (mac, interface, module, ks) in discovered_macs:
rows = self.db.select("""mac from networks
where mac = %s """, mac)
if not rows:
# Check if the interface exists without a MAC.
r = self.db.select("""device from networks, nodes
where device = %s and networks.node = nodes.id
and nodes.name=%s""", (interface, host))
if not r:
# If it does not, add the interface before setting MAC addresses
self.command('add.host.interface',
[host, 'interface=%s' % interface])
# Update the MAC address of the interface
self.command('set.host.interface.mac', [host, 'interface=%s' % interface, 'mac=%s' % mac ])
# Update the kernel module if that information is sent back
if module:
self.command('set.host.interface.module',
[host, 'interface=%s' % interface, 'module=%s' % module])
post_config = self.command('list.host.interface', [host])
if pre_config != post_config:
self.command('sync.config')
| 25.298013 | 95 | 0.647644 |
7946622b0e6c60284fb737fc8a80441154e5abfd | 8,499 | py | Python | lace/test_geometry.py | metabolize/lace | 75cee6a118932cd027692d6cfe36b3726b3a4a5c | [
"BSD-2-Clause"
] | null | null | null | lace/test_geometry.py | metabolize/lace | 75cee6a118932cd027692d6cfe36b3726b3a4a5c | [
"BSD-2-Clause"
] | 6 | 2018-07-12T22:22:14.000Z | 2019-09-28T15:47:38.000Z | lace/test_geometry.py | metabolize/lace | 75cee6a118932cd027692d6cfe36b3726b3a4a5c | [
"BSD-2-Clause"
] | null | null | null | import unittest
import numpy as np
from bltest import attr
import vg
from lace.cache import sc, vc
from lace.mesh import Mesh
class TestGeometryMixin(unittest.TestCase):
debug = False
@attr('missing_assets')
def test_cut_across_axis(self):
original_mesh = Mesh(filename=sc('s3://bodylabs-assets/example_meshes/average_female.obj'))
# Set up
mesh = original_mesh.copy()
# Sanity check
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Act
mesh.cut_across_axis(0, minval=-0.1)
# Test
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.1, decimal=1)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Visualize
if self.debug:
mesh.show()
# Set up
mesh = original_mesh.copy()
# Sanity check
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Act
mesh.cut_across_axis(0, maxval=0.1)
# Test
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.1, decimal=1)
# Visualize
if self.debug:
mesh.show()
# Set up
mesh = original_mesh.copy()
# Sanity check
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Act
mesh.cut_across_axis(0, minval=-0.1, maxval=0.1)
# Test
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.1, decimal=1)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.1, decimal=1)
# Visualize
if self.debug:
mesh.show()
@attr('missing_assets')
def test_cut_across_axis_by_percentile(self):
original_mesh = Mesh(filename=sc('s3://bodylabs-assets/example_meshes/average_female.obj'))
# Set up
mesh = original_mesh.copy()
# Sanity check
np.testing.assert_almost_equal(mesh.v[:, 0].min(), -0.3668)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.673871)
# Act
mesh.cut_across_axis_by_percentile(0, 25, 40)
# Test
np.testing.assert_almost_equal(mesh.v[:, 0].min(), 0.03, decimal=1)
np.testing.assert_almost_equal(mesh.v[:, 0].max(), 0.10, decimal=1)
# Visualize
if self.debug:
mesh.show()
def test_first_blip(self):
'''
Create up a triagonal prism, the base of which is an equilateral
triangle with one side along the x axis and its third point on the
+y axis.
Take three origins and vectors whose first_blip should be the same
vertex.
'''
import math
from lace import shapes
points = np.array([
[1, 0, 0],
[0, math.sqrt(1.25), 0],
[-1, 0, 0],
])
prism = shapes.create_triangular_prism(*points, height=4)
test_cases = [
{'origin': [0, -0.5, 0], 'initial_direction': [-1, 0.3, 0]},
{'origin': [-1.1, -0.5, 0], 'initial_direction': [0, 1, 0]},
{'origin': [-1, 1, 0], 'initial_direction': [-1, -1, 0]},
]
# TODO This is a little sloppy. Because flatten_dim=2,
# [-1, 0, 0] and [-1, 0, 4] are equally valid results.
expected_v = [-1, 0, 0]
for test_case in test_cases:
np.testing.assert_array_equal(
prism.first_blip(2, test_case['origin'], test_case['initial_direction']),
expected_v
)
def test_first_blip_ignores_squash_axis(self):
'''
The above test is nice, but since the object is a prism, it's the same along
the z-axis -- and hence isn't really testing squash_axis. Here's another test
with a different setup which tries to get at it.
'''
import math
from lace import shapes
points = np.array([
[1, 0, 0],
[0, math.sqrt(1.25), 0],
[-1, 0, 0],
])
prism = shapes.create_triangular_prism(*points, height=4)
test_cases = [
{'origin': [-1, 0, -1], 'initial_direction': [0, 1, 0]},
]
expected_v = [0, math.sqrt(1.25), 0]
for test_case in test_cases:
np.testing.assert_array_equal(
prism.first_blip(0, test_case['origin'], test_case['initial_direction']),
expected_v
)
def test_reorient_is_noop_on_empty_mesh(self):
mesh = Mesh()
mesh.reorient(up=[0, 1, 0], look=[0, 0, 1])
def test_scale_is_noop_on_empty_mesh(self):
mesh = Mesh()
mesh.scale(7)
def test_translate_is_noop_on_empty_mesh(self):
mesh = Mesh()
mesh.translate(1)
def test_centroid_is_undefined_on_empty_mesh(self):
mesh = Mesh()
with self.assertRaises(ValueError) as ctx:
mesh.centroid # pylint: disable=pointless-statement
self.assertEqual(
str(ctx.exception),
'Mesh has no vertices; centroid is not defined'
)
def test_bounding_box_is_undefined_on_empty_mesh(self):
mesh = Mesh()
with self.assertRaises(ValueError) as ctx:
mesh.bounding_box # pylint: disable=pointless-statement
self.assertEqual(
str(ctx.exception),
'Mesh has no vertices; bounding box is not defined'
)
def test_recenter_over_floor_raises_expected_on_empty_mesh(self):
mesh = Mesh()
with self.assertRaises(ValueError) as ctx:
mesh.recenter_over_floor()
self.assertEqual(
str(ctx.exception),
'Mesh has no vertices; centroid is not defined'
)
def test_predict_body_units_m(self):
mesh = Mesh()
mesh.v = np.array([[1.5, 1.0, 0.5], [-1.5, 0.5, 1.5]])
self.assertEqual(mesh.predict_body_units(), 'm')
def test_predict_body_units_cm(self):
mesh = Mesh()
mesh.v = np.array([[150, 100, 50], [-150, 50, 150]])
self.assertEqual(mesh.predict_body_units(), 'cm')
@attr('missing_assets')
def test_flip(self):
raw_box = Mesh(vc('/unittest/serialization/obj/test_box_simple.obj'))
box = Mesh(v=raw_box.v, f=raw_box.f)
box.reset_normals()
original_v = box.v.copy()
original_vn = box.vn.copy()
original_f = box.f.copy()
box.flip(axis=0)
box.reset_normals()
self.assertEqual(box.f.shape, original_f.shape)
self.assertEqual(box.v.shape, original_v.shape)
for face, orig_face in zip(box.f, original_f):
self.assertNotEqual(list(face), list(orig_face))
self.assertEqual(set(face), set(orig_face))
np.testing.assert_array_almost_equal(box.v[:, 0], np.negative(original_v[:, 0]))
np.testing.assert_array_almost_equal(box.v[:, 1], original_v[:, 1])
np.testing.assert_array_almost_equal(box.v[:, 2], original_v[:, 2])
np.testing.assert_array_almost_equal(box.vn[:, 0], np.negative(original_vn[:, 0]))
np.testing.assert_array_almost_equal(box.vn[:, 1], original_vn[:, 1])
np.testing.assert_array_almost_equal(box.vn[:, 2], original_vn[:, 2])
def test_reorient_faces_using_normals(self):
import math
from lace import shapes
from polliwog.tri.surface_normals import surface_normal
points = np.array([
[1, 0, 0],
[0, math.sqrt(1.25), 0],
[-1, 0, 0],
])
prism = shapes.create_triangular_prism(*points, height=4)
correct_faces = prism.f.copy()
# Generate normals that are slightly off in random directions.
prism.fn = vg.normalize(
surface_normal(prism.v[prism.f]) + \
0.05 * np.random.random(len(prism.f)*3).reshape(len(prism.f), 3))
# Flip a few of the faces.
n_to_flip = 4
to_flip = np.random.permutation(len(prism.f))[:n_to_flip]
prism.flip_faces(to_flip)
# Confidence check.
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, prism.f, correct_faces)
prism.reorient_faces_using_normals()
# Confidence check.
np.testing.assert_array_equal(prism.f, correct_faces)
| 34.408907 | 103 | 0.589716 |
794662892560ebac6135122f05ea321073441773 | 3,739 | py | Python | eeve/taskinfo.py | vMarcelino/eeve | 7dcfa17d34480f5c120ce963680babffff8ab412 | [
"Apache-2.0"
] | 1 | 2019-10-11T18:42:48.000Z | 2019-10-11T18:42:48.000Z | eeve/taskinfo.py | vMarcelino/eeve | 7dcfa17d34480f5c120ce963680babffff8ab412 | [
"Apache-2.0"
] | null | null | null | eeve/taskinfo.py | vMarcelino/eeve | 7dcfa17d34480f5c120ce963680babffff8ab412 | [
"Apache-2.0"
] | 1 | 2019-10-11T18:42:49.000Z | 2019-10-11T18:42:49.000Z | from eeve.variable import VariableGroup, Variable
from dataclasses import dataclass
from typing import List
@dataclass
class TaskInfo:
actions: 'List[Action]'
global_variables: VariableGroup
task_variables: VariableGroup
local_variables: VariableGroup
current_action_index: int = 0
increment_action_index: bool = True
def get_next_actions(self):
"""Returns an enumerated list of actions starting from the task
with index 'current_action_index + 1'
Returns:
List[Tuple[int, Action]] -- Enumerated list of next actions
"""
return enumerate(self.actions[self.current_action_index + 1:], self.current_action_index + 1)
def get_var(self, var_name: str):
"""Returns a variable reference if var_name starts with var
or the variable value if it does not. In case var_name is vars
(either var$[$[$]]vars or [$[$[$]]]vars), the returned value is a dict in the format
{var_name:Variable(var_name, var_value), ...} for the former or a dict in the format
{var_name:var_value, ...} for the latter.
Scopes:
$ or empty -> local scope
$$ -> task scope
$$$ -> global scope
Arguments:
var_name {str} -- the variable name with format [var][$[$[$]]]var_name
Returns:
Union[Any, dict] -- The requested variable info (read docstring above)
"""
original_var_name = var_name
is_var_ref = var_name.startswith('var$')
class Scopes:
Global = self.global_variables
Task = self.task_variables
Local = self.local_variables
scope: VariableGroup = Scopes.Local
# remove 'var' prefix if is variable reference
if is_var_ref:
var_name = var_name[3:]
# define scope and remove all prefixing '$', being variable reference or just variable value
if var_name.startswith('$$$'):
scope = Scopes.Global
var_name = var_name[3:]
elif var_name.startswith('$$'):
scope = Scopes.Task
var_name = var_name[2:]
elif var_name.startswith('$'):
scope = Scopes.Local
var_name = var_name[1:]
# return variable ref or value
if is_var_ref:
if var_name == 'vars':
return scope.vars
else:
return scope.get_or_create(var_name)
else:
if var_name == 'vars':
return scope.to_kwargs()
else:
return scope.get(var_name, original_var_name)
def has_var(self, var_name: str) -> bool:
"""Checks if a given variable exists.
the 'var' prefix is ignored
Scopes:
$ or empty -> local scope
$$ -> task scope
$$$ -> global scope
Arguments:
var_name {str} -- the name of the variable
Returns:
bool -- Does the variable exist?
"""
is_var = var_name.startswith('var$')
class Scopes:
Global = self.global_variables
Task = self.task_variables
Local = self.local_variables
scope = Scopes.Local
if is_var:
var_name = var_name[3:]
if var_name.startswith('$$$'):
scope = Scopes.Global
var_name = var_name[3:]
elif var_name.startswith('$$'):
scope = Scopes.Task
var_name = var_name[2:]
elif var_name.startswith('$'):
scope = Scopes.Local
var_name = var_name[1:]
return var_name in scope.vars
| 31.957265 | 101 | 0.564589 |
7946633eb5861a20e275a53d4f2d0f62498192d3 | 107,420 | py | Python | ambiente_virtual/Lib/site-packages/sqlalchemy/dialects/mssql/base.py | PI-UNIVESP-Penapolis/PRODEA | 1ced58f52bace8b6de0de3c6516b9fb7231da09c | [
"MIT"
] | 1 | 2021-09-22T13:14:37.000Z | 2021-09-22T13:14:37.000Z | ambiente_virtual/Lib/site-packages/sqlalchemy/dialects/mssql/base.py | PI-UNIVESP-Penapolis/PRODEA | 1ced58f52bace8b6de0de3c6516b9fb7231da09c | [
"MIT"
] | null | null | null | ambiente_virtual/Lib/site-packages/sqlalchemy/dialects/mssql/base.py | PI-UNIVESP-Penapolis/PRODEA | 1ced58f52bace8b6de0de3c6516b9fb7231da09c | [
"MIT"
] | null | null | null | # mssql/base.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql
:name: Microsoft SQL Server
:full_support: 2017
:normal_support: 2012+
:best_effort: 2005+
.. _mssql_external_dialects:
External Dialects
-----------------
In addition to the above DBAPI layers with native SQLAlchemy support, there
are third-party dialects for other DBAPI layers that are compatible
with SQL Server. See the "External Dialects" list on the
:ref:`dialect_toplevel` page.
.. _mssql_identity:
Auto Increment Behavior / IDENTITY Columns
------------------------------------------
SQL Server provides so-called "auto incrementing" behavior using the
``IDENTITY`` construct, which can be placed on any single integer column in a
table. SQLAlchemy considers ``IDENTITY`` within its default "autoincrement"
behavior for an integer primary key column, described at
:paramref:`_schema.Column.autoincrement`. This means that by default,
the first
integer primary key column in a :class:`_schema.Table`
will be considered to be the
identity column - unless it is associated with a :class:`.Sequence` - and will
generate DDL as such::
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY,
x INTEGER NULL,
PRIMARY KEY (id)
)
For the case where this default generation of ``IDENTITY`` is not desired,
specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag,
on the first integer primary key column::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
To add the ``IDENTITY`` keyword to a non-primary key column, specify
``True`` for the :paramref:`_schema.Column.autoincrement` flag on the desired
:class:`_schema.Column` object, and ensure that
:paramref:`_schema.Column.autoincrement`
is set to ``False`` on any integer primary key column::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer, autoincrement=True))
m.create_all(engine)
.. versionchanged:: 1.4 Added :class:`_schema.Identity` construct
in a :class:`_schema.Column` to specify the start and increment
parameters of an IDENTITY. These replace
the use of the :class:`.Sequence` object in order to specify these values.
.. deprecated:: 1.4
The ``mssql_identity_start`` and ``mssql_identity_increment`` parameters
to :class:`_schema.Column` are deprecated and should we replaced by
an :class:`_schema.Identity` object. Specifying both ways of configuring
an IDENTITY will result in a compile error.
These options are also no longer returned as part of the
``dialect_options`` key in :meth:`_reflection.Inspector.get_columns`.
Use the information in the ``identity`` key instead.
.. deprecated:: 1.3
The use of :class:`.Sequence` to specify IDENTITY characteristics is
deprecated and will be removed in a future release. Please use
the :class:`_schema.Identity` object parameters
:paramref:`_schema.Identity.start` and
:paramref:`_schema.Identity.increment`.
.. versionchanged:: 1.4 Removed the ability to use a :class:`.Sequence`
object to modify IDENTITY characteristics. :class:`.Sequence` objects
now only manipulate true T-SQL SEQUENCE types.
.. note::
There can only be one IDENTITY column on the table. When using
``autoincrement=True`` to enable the IDENTITY keyword, SQLAlchemy does not
guard against multiple columns specifying the option simultaneously. The
SQL Server database will instead reject the ``CREATE TABLE`` statement.
.. note::
An INSERT statement which attempts to provide a value for a column that is
marked with IDENTITY will be rejected by SQL Server. In order for the
value to be accepted, a session-level option "SET IDENTITY_INSERT" must be
enabled. The SQLAlchemy SQL Server dialect will perform this operation
automatically when using a core :class:`_expression.Insert`
construct; if the
execution specifies a value for the IDENTITY column, the "IDENTITY_INSERT"
option will be enabled for the span of that statement's invocation.However,
this scenario is not high performing and should not be relied upon for
normal use. If a table doesn't actually require IDENTITY behavior in its
integer primary key column, the keyword should be disabled when creating
the table by ensuring that ``autoincrement=False`` is set.
Controlling "Start" and "Increment"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Specific control over the "start" and "increment" values for
the ``IDENTITY`` generator are provided using the
:paramref:`_schema.Identity.start` and :paramref:`_schema.Identity.increment`
parameters passed to the :class:`_schema.Identity` object::
from sqlalchemy import Table, Integer, Column, Identity
test = Table(
'test', metadata,
Column(
'id',
Integer,
primary_key=True,
Identity(start=100, increment=10)
),
Column('name', String(20))
)
The CREATE TABLE for the above :class:`_schema.Table` object would be:
.. sourcecode:: sql
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
.. note::
The :class:`_schema.Identity` object supports many other parameter in
addition to ``start`` and ``increment``. These are not supported by
SQL Server and will be ignored when generating the CREATE TABLE ddl.
.. versionchanged:: 1.3.19 The :class:`_schema.Identity` object is
now used to affect the
``IDENTITY`` generator for a :class:`_schema.Column` under SQL Server.
Previously, the :class:`.Sequence` object was used. As SQL Server now
supports real sequences as a separate construct, :class:`.Sequence` will be
functional in the normal way starting from SQLAlchemy version 1.4.
INSERT behavior
^^^^^^^^^^^^^^^^
Handling of the ``IDENTITY`` column at INSERT time involves two key
techniques. The most common is being able to fetch the "last inserted value"
for a given ``IDENTITY`` column, a process which SQLAlchemy performs
implicitly in many cases, most importantly within the ORM.
The process for fetching this value has several variants:
* In the vast majority of cases, RETURNING is used in conjunction with INSERT
statements on SQL Server in order to get newly generated primary key values:
.. sourcecode:: sql
INSERT INTO t (x) OUTPUT inserted.id VALUES (?)
* When RETURNING is not available or has been disabled via
``implicit_returning=False``, either the ``scope_identity()`` function or
the ``@@identity`` variable is used; behavior varies by backend:
* when using PyODBC, the phrase ``; select scope_identity()`` will be
appended to the end of the INSERT statement; a second result set will be
fetched in order to receive the value. Given a table as::
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False)
an INSERT will look like:
.. sourcecode:: sql
INSERT INTO t (x) VALUES (?); select scope_identity()
* Other dialects such as pymssql will call upon
``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT
statement. If the flag ``use_scope_identity=False`` is passed to
:func:`_sa.create_engine`,
the statement ``SELECT @@identity AS lastrowid``
is used instead.
A table that contains an ``IDENTITY`` column will prohibit an INSERT statement
that refers to the identity column explicitly. The SQLAlchemy dialect will
detect when an INSERT construct, created using a core
:func:`_expression.insert`
construct (not a plain string SQL), refers to the identity column, and
in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert
statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
with engine.begin() as conn:
conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
how SQLAlchemy handles this:
.. sourcecode:: sql
CREATE TABLE t (
id INTEGER NOT NULL IDENTITY(1,1),
x INTEGER NULL,
PRIMARY KEY (id)
)
COMMIT
SET IDENTITY_INSERT t ON
INSERT INTO t (id, x) VALUES (?, ?)
((1, 1), (2, 2))
SET IDENTITY_INSERT t OFF
COMMIT
This
is an auxiliary use case suitable for testing and bulk insert scenarios.
SEQUENCE support
----------------
The :class:`.Sequence` object now creates "real" sequences, i.e.,
``CREATE SEQUENCE``. To provide compatibility with other dialects,
:class:`.Sequence` defaults to a start value of 1, even though the
T-SQL defaults is -9223372036854775808.
.. versionadded:: 1.4.0
MAX on VARCHAR / NVARCHAR
-------------------------
SQL Server supports the special string "MAX" within the
:class:`_types.VARCHAR` and :class:`_types.NVARCHAR` datatypes,
to indicate "maximum length possible". The dialect currently handles this as
a length of "None" in the base type, rather than supplying a
dialect-specific version of these types, so that a base type
specified such as ``VARCHAR(None)`` can assume "unlengthed" behavior on
more than one backend without using dialect-specific types.
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
my_table = Table(
'my_table', metadata,
Column('my_data', VARCHAR(None)),
Column('my_n_data', NVARCHAR(None))
)
Collation Support
-----------------
Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`_schema.Table`, the
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
LIMIT/OFFSET Support
--------------------
MSSQL has added support for LIMIT / OFFSET as of SQL Server 2012, via the
"OFFSET n ROWS" and "FETCH NEXT n ROWS" clauses. SQLAlchemy supports these
syntaxes automatically if SQL Server 2012 or greater is detected.
.. versionchanged:: 1.4 support added for SQL Server "OFFSET n ROWS" and
"FETCH NEXT n ROWS" syntax.
For statements that specify only LIMIT and no OFFSET, all versions of SQL
Server support the TOP keyword. This syntax is used for all SQL Server
versions when no OFFSET clause is present. A statement such as::
select(some_table).limit(5)
will render similarly to::
SELECT TOP 5 col1, col2.. FROM table
For versions of SQL Server prior to SQL Server 2012, a statement that uses
LIMIT and OFFSET, or just OFFSET alone, will be rendered using the
``ROW_NUMBER()`` window function. A statement such as::
select(some_table).order_by(some_table.c.col3).limit(5).offset(10)
will render similarly to::
SELECT anon_1.col1, anon_1.col2 FROM (SELECT col1, col2,
ROW_NUMBER() OVER (ORDER BY col3) AS
mssql_rn FROM table WHERE t.x = :x_1) AS
anon_1 WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1
Note that when using LIMIT and/or OFFSET, whether using the older
or newer SQL Server syntaxes, the statement must have an ORDER BY as well,
else a :class:`.CompileError` is raised.
.. _mssql_isolation_level:
Transaction Isolation Level
---------------------------
All SQL Server dialects support setting of transaction isolation level
both via a dialect-specific parameter
:paramref:`_sa.create_engine.isolation_level`
accepted by :func:`_sa.create_engine`,
as well as the :paramref:`.Connection.execution_options.isolation_level`
argument as passed to
:meth:`_engine.Connection.execution_options`.
This feature works by issuing the
command ``SET TRANSACTION ISOLATION LEVEL <level>`` for
each new connection.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mssql+pyodbc://scott:tiger@ms_2008",
isolation_level="REPEATABLE READ"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``AUTOCOMMIT`` - pyodbc / pymssql-specific
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``SNAPSHOT`` - specific to SQL Server
.. versionadded:: 1.2 added AUTOCOMMIT isolation level setting
.. seealso::
:ref:`dbapi_autocommit`
Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
name VARCHAR(20)
If ``nullable`` is ``True`` or ``False`` then the column will be
``NULL`` or ``NOT NULL`` respectively.
Date / Time Handling
--------------------
DATE and TIME are supported. Bind parameters are converted
to datetime.datetime() objects as required by most MSSQL drivers,
and results are processed from strings if needed.
The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
.. _mssql_large_type_deprecation:
Large Text/Binary Type Deprecation
----------------------------------
Per
`SQL Server 2012/2014 Documentation <https://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL
Server in a future release. SQLAlchemy normally relates these types to the
:class:`.UnicodeText`, :class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes.
In order to accommodate this change, a new flag ``deprecate_large_types``
is added to the dialect, which will be automatically set based on detection
of the server version in use, if not otherwise set by the user. The
behavior of this flag is as follows:
* When this flag is ``True``, the :class:`.UnicodeText`,
:class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
respectively. This is a new behavior as of the addition of this flag.
* When this flag is ``False``, the :class:`.UnicodeText`,
:class:`_expression.TextClause` and
:class:`.LargeBinary` datatypes, when used to render DDL, will render the
types ``NTEXT``, ``TEXT``, and ``IMAGE``,
respectively. This is the long-standing behavior of these types.
* The flag begins with the value ``None``, before a database connection is
established. If the dialect is used to render DDL without the flag being
set, it is interpreted the same as ``False``.
* On first connection, the dialect detects if SQL Server version 2012 or
greater is in use; if the flag is still at ``None``, it sets it to ``True``
or ``False`` based on whether 2012 or greater is detected.
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`_sa.create_engine`::
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
instead: :class:`_types.NVARCHAR`, :class:`_types.VARCHAR`,
:class:`_types.VARBINARY`, :class:`_types.TEXT`, :class:`_mssql.NTEXT`,
:class:`_mssql.IMAGE`
will always remain fixed and always output exactly that
type.
.. versionadded:: 1.0.0
.. _multipart_schema_names:
Multipart Schema Names
----------------------
SQL Server schemas sometimes require multiple parts to their "schema"
qualifier, that is, including the database name and owner name as separate
tokens, such as ``mydatabase.dbo.some_table``. These multipart names can be set
at once using the :paramref:`_schema.Table.schema` argument of
:class:`_schema.Table`::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="mydatabase.dbo"
)
When performing operations such as table or component reflection, a schema
argument that contains a dot will be split into separate
"database" and "owner" components in order to correctly query the SQL
Server information schema tables, as these two values are stored separately.
Additionally, when rendering the schema name for DDL or SQL, the two
components will be quoted separately for case sensitive names and other
special characters. Given an argument as below::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="MyDataBase.dbo"
)
The above schema would be rendered as ``[MyDataBase].dbo``, and also in
reflection, would be reflected using "dbo" as the owner and "MyDataBase"
as the database name.
To control how the schema name is broken into database / owner,
specify brackets (which in SQL Server are quoting characters) in the name.
Below, the "owner" will be considered as ``MyDataBase.dbo`` and the
"database" will be None::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.dbo]"
)
To individually specify both database and owner name with special characters
or embedded dots, use two sets of brackets::
Table(
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.Period].[MyOwner.Dot]"
)
.. versionchanged:: 1.2 the SQL Server dialect now treats brackets as
identifier delimiters splitting the schema into separate database
and owner tokens, to allow dots within either name itself.
.. _legacy_schema_rendering:
Legacy Schema Mode
------------------
Very old versions of the MSSQL dialect introduced the behavior such that a
schema-qualified table would be auto-aliased when used in a
SELECT statement; given a table::
account_table = Table(
'account', metadata,
Column('id', Integer, primary_key=True),
Column('info', String(100)),
schema="customer_schema"
)
this legacy mode of rendering would assume that "customer_schema.account"
would not be accepted by all parts of the SQL statement, as illustrated
below::
>>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
>>> print(account_table.select().compile(eng))
SELECT account_1.id, account_1.info
FROM customer_schema.account AS account_1
This mode of behavior is now off by default, as it appears to have served
no purpose; however in the case that legacy applications rely upon it,
it is available using the ``legacy_schema_aliasing`` argument to
:func:`_sa.create_engine` as illustrated above.
.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
in version 1.0.5 to allow disabling of legacy mode for schemas now
defaults to False.
.. deprecated:: 1.4
The ``legacy_schema_aliasing`` flag is now
deprecated and will be removed in a future release.
.. _mssql_indexes:
Clustered Index Support
-----------------------
The MSSQL dialect supports clustered indexes (and primary keys) via the
``mssql_clustered`` option. This option is available to :class:`.Index`,
:class:`.UniqueConstraint`. and :class:`.PrimaryKeyConstraint`.
To generate a clustered index::
Index("my_index", table.c.x, mssql_clustered=True)
which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
To explicitly request a non-clustered primary key (for example, when
a separate clustered index is desired), use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
which will render the table, for example, as::
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y))
.. versionchanged:: 1.1 the ``mssql_clustered`` option now defaults
to None, rather than False. ``mssql_clustered=False`` now explicitly
renders the NONCLUSTERED clause, whereas None omits the CLUSTERED
clause entirely, allowing SQL Server defaults to take effect.
MSSQL-Specific Index Options
-----------------------------
In addition to clustering, the MSSQL dialect supports other special options
for :class:`.Index`.
INCLUDE
^^^^^^^
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
.. _mssql_index_where:
Filtered Indexes
^^^^^^^^^^^^^^^^
The ``mssql_where`` option renders WHERE(condition) for the given string
names::
Index("my_index", table.c.x, mssql_where=table.c.x > 10)
would render the index as ``CREATE INDEX my_index ON table (x) WHERE x > 10``.
.. versionadded:: 1.3.4
Index ordering
^^^^^^^^^^^^^^
Index ordering is available via functional expressions, such as::
Index("my_index", table.c.x.desc())
would render the index as ``CREATE INDEX my_index ON table (x DESC)``
.. seealso::
:ref:`schema_indexes_functional`
Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibility level information. Because of this, if running under
a backwards compatibility mode SQLAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
Triggers
--------
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`_schema.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`_sa.create_engine`.
.. _mssql_rowcount_versioning:
Rowcount Support / ORM Versioning
---------------------------------
The SQL Server drivers may have limited ability to return the number
of rows updated from an UPDATE or DELETE statement.
As of this writing, the PyODBC driver is not able to return a rowcount when
OUTPUT INSERTED is used. This impacts the SQLAlchemy ORM's versioning feature
in many cases where server-side value generators are in use in that while the
versioning operations can succeed, the ORM cannot always check that an UPDATE
or DELETE statement matched the number of rows expected, which is how it
verifies that the version identifier matched. When this condition occurs, a
warning will be emitted but the operation will proceed.
The use of OUTPUT INSERTED can be disabled by setting the
:paramref:`_schema.Table.implicit_returning` flag to ``False`` on a particular
:class:`_schema.Table`, which in declarative looks like::
class MyTable(Base):
__tablename__ = 'mytable'
id = Column(Integer, primary_key=True)
stuff = Column(String(10))
timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
__mapper_args__ = {
'version_id_col': timestamp,
'version_id_generator': False,
}
__table_args__ = {
'implicit_returning': False
}
Enabling Snapshot Isolation
---------------------------
SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON
Background on SQL Server snapshot isolation is available at
https://msdn.microsoft.com/en-us/library/ms175095.aspx.
""" # noqa
import codecs
import datetime
import operator
import re
from . import information_schema as ischema
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from ... import exc
from ... import Identity
from ... import schema as sa_schema
from ... import Sequence
from ... import sql
from ... import text
from ... import types as sqltypes
from ... import util
from ...engine import cursor as _cursor
from ...engine import default
from ...engine import reflection
from ...sql import coercions
from ...sql import compiler
from ...sql import elements
from ...sql import expression
from ...sql import func
from ...sql import quoted_name
from ...sql import roles
from ...sql import util as sql_util
from ...types import BIGINT
from ...types import BINARY
from ...types import CHAR
from ...types import DATE
from ...types import DATETIME
from ...types import DECIMAL
from ...types import FLOAT
from ...types import INTEGER
from ...types import NCHAR
from ...types import NUMERIC
from ...types import NVARCHAR
from ...types import SMALLINT
from ...types import TEXT
from ...types import VARCHAR
from ...util import compat
from ...util import update_wrapper
from ...util.langhelpers import public_factory
# https://sqlserverbuilds.blogspot.com/
MS_2017_VERSION = (14,)
MS_2016_VERSION = (13,)
MS_2014_VERSION = (12,)
MS_2012_VERSION = (11,)
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
RESERVED_WORDS = set(
[
"add",
"all",
"alter",
"and",
"any",
"as",
"asc",
"authorization",
"backup",
"begin",
"between",
"break",
"browse",
"bulk",
"by",
"cascade",
"case",
"check",
"checkpoint",
"close",
"clustered",
"coalesce",
"collate",
"column",
"commit",
"compute",
"constraint",
"contains",
"containstable",
"continue",
"convert",
"create",
"cross",
"current",
"current_date",
"current_time",
"current_timestamp",
"current_user",
"cursor",
"database",
"dbcc",
"deallocate",
"declare",
"default",
"delete",
"deny",
"desc",
"disk",
"distinct",
"distributed",
"double",
"drop",
"dump",
"else",
"end",
"errlvl",
"escape",
"except",
"exec",
"execute",
"exists",
"exit",
"external",
"fetch",
"file",
"fillfactor",
"for",
"foreign",
"freetext",
"freetexttable",
"from",
"full",
"function",
"goto",
"grant",
"group",
"having",
"holdlock",
"identity",
"identity_insert",
"identitycol",
"if",
"in",
"index",
"inner",
"insert",
"intersect",
"into",
"is",
"join",
"key",
"kill",
"left",
"like",
"lineno",
"load",
"merge",
"national",
"nocheck",
"nonclustered",
"not",
"null",
"nullif",
"of",
"off",
"offsets",
"on",
"open",
"opendatasource",
"openquery",
"openrowset",
"openxml",
"option",
"or",
"order",
"outer",
"over",
"percent",
"pivot",
"plan",
"precision",
"primary",
"print",
"proc",
"procedure",
"public",
"raiserror",
"read",
"readtext",
"reconfigure",
"references",
"replication",
"restore",
"restrict",
"return",
"revert",
"revoke",
"right",
"rollback",
"rowcount",
"rowguidcol",
"rule",
"save",
"schema",
"securityaudit",
"select",
"session_user",
"set",
"setuser",
"shutdown",
"some",
"statistics",
"system_user",
"table",
"tablesample",
"textsize",
"then",
"to",
"top",
"tran",
"transaction",
"trigger",
"truncate",
"tsequal",
"union",
"unique",
"unpivot",
"update",
"updatetext",
"use",
"user",
"values",
"varying",
"view",
"waitfor",
"when",
"where",
"while",
"with",
"writetext",
]
)
class REAL(sqltypes.REAL):
__visit_name__ = "REAL"
def __init__(self, **kw):
# REAL is a synonym for FLOAT(24) on SQL server.
# it is only accepted as the word "REAL" in DDL, the numeric
# precision value is not allowed to be present
kw.setdefault("precision", 24)
super(REAL, self).__init__(**kw)
class TINYINT(sqltypes.Integer):
__visit_name__ = "TINYINT"
# MSSQL DATE/TIME types have varied behavior, sometimes returning
# strings. MSDate/TIME check for everything, and always
# filter bind parameters into datetime objects (required by pyodbc,
# not sure about other dialects).
class _MSDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
_reg = re.compile(r"(\d+)-(\d+)-(\d+)")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
m = self._reg.match(value)
if not m:
raise ValueError(
"could not parse %r as a date value" % (value,)
)
return datetime.date(*[int(x or 0) for x in m.groups()])
else:
return value
return process
class TIME(sqltypes.TIME):
def __init__(self, precision=None, **kwargs):
self.precision = precision
super(TIME, self).__init__()
__zero_date = datetime.date(1900, 1, 1)
def bind_processor(self, dialect):
def process(value):
if isinstance(value, datetime.datetime):
value = datetime.datetime.combine(
self.__zero_date, value.time()
)
elif isinstance(value, datetime.time):
"""issue #5339
per: https://github.com/mkleehammer/pyodbc/wiki/Tips-and-Tricks-by-Database-Platform#time-columns
pass TIME value as string
""" # noqa
value = str(value)
return value
return process
_reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?")
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
m = self._reg.match(value)
if not m:
raise ValueError(
"could not parse %r as a time value" % (value,)
)
return datetime.time(*[int(x or 0) for x in m.groups()])
else:
return value
return process
_MSTime = TIME
class _DateTimeBase(object):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
class _MSDateTime(_DateTimeBase, sqltypes.DateTime):
pass
class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "SMALLDATETIME"
class DATETIME2(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "DATETIME2"
def __init__(self, precision=None, **kw):
super(DATETIME2, self).__init__(**kw)
self.precision = precision
class DATETIMEOFFSET(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "DATETIMEOFFSET"
def __init__(self, precision=None, **kw):
super(DATETIMEOFFSET, self).__init__(**kw)
self.precision = precision
class _UnicodeLiteral(object):
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
if dialect.identifier_preparer._double_percents:
value = value.replace("%", "%%")
return "N'%s'" % value
return process
class _MSUnicode(_UnicodeLiteral, sqltypes.Unicode):
pass
class _MSUnicodeText(_UnicodeLiteral, sqltypes.UnicodeText):
pass
class TIMESTAMP(sqltypes._Binary):
"""Implement the SQL Server TIMESTAMP type.
Note this is **completely different** than the SQL Standard
TIMESTAMP type, which is not supported by SQL Server. It
is a read-only datatype that does not support INSERT of values.
.. versionadded:: 1.2
.. seealso::
:class:`_mssql.ROWVERSION`
"""
__visit_name__ = "TIMESTAMP"
# expected by _Binary to be present
length = None
def __init__(self, convert_int=False):
"""Construct a TIMESTAMP or ROWVERSION type.
:param convert_int: if True, binary integer values will
be converted to integers on read.
.. versionadded:: 1.2
"""
self.convert_int = convert_int
def result_processor(self, dialect, coltype):
super_ = super(TIMESTAMP, self).result_processor(dialect, coltype)
if self.convert_int:
def process(value):
value = super_(value)
if value is not None:
# https://stackoverflow.com/a/30403242/34549
value = int(codecs.encode(value, "hex"), 16)
return value
return process
else:
return super_
class ROWVERSION(TIMESTAMP):
"""Implement the SQL Server ROWVERSION type.
The ROWVERSION datatype is a SQL Server synonym for the TIMESTAMP
datatype, however current SQL Server documentation suggests using
ROWVERSION for new datatypes going forward.
The ROWVERSION datatype does **not** reflect (e.g. introspect) from the
database as itself; the returned datatype will be
:class:`_mssql.TIMESTAMP`.
This is a read-only datatype that does not support INSERT of values.
.. versionadded:: 1.2
.. seealso::
:class:`_mssql.TIMESTAMP`
"""
__visit_name__ = "ROWVERSION"
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
__visit_name__ = "NTEXT"
class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
"""The MSSQL VARBINARY type.
This type is present to support "deprecate_large_types" mode where
either ``VARBINARY(max)`` or IMAGE is rendered. Otherwise, this type
object is redundant vs. :class:`_types.VARBINARY`.
.. versionadded:: 1.0.0
.. seealso::
:ref:`mssql_large_type_deprecation`
"""
__visit_name__ = "VARBINARY"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = "IMAGE"
class XML(sqltypes.Text):
"""MSSQL XML type.
This is a placeholder type for reflection purposes that does not include
any Python-side datatype support. It also does not currently support
additional arguments, such as "CONTENT", "DOCUMENT",
"xml_schema_collection".
.. versionadded:: 1.1.11
"""
__visit_name__ = "XML"
class BIT(sqltypes.Boolean):
"""MSSQL BIT type.
Both pyodbc and pymssql return values from BIT columns as
Python <class 'bool'> so just subclass Boolean.
"""
__visit_name__ = "BIT"
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class SQL_VARIANT(sqltypes.TypeEngine):
__visit_name__ = "SQL_VARIANT"
class TryCast(sql.elements.Cast):
"""Represent a SQL Server TRY_CAST expression."""
__visit_name__ = "try_cast"
stringify_dialect = "mssql"
def __init__(self, *arg, **kw):
"""Create a TRY_CAST expression.
:class:`.TryCast` is a subclass of SQLAlchemy's :class:`.Cast`
construct, and works in the same way, except that the SQL expression
rendered is "TRY_CAST" rather than "CAST"::
from sqlalchemy import select
from sqlalchemy import Numeric
from sqlalchemy.dialects.mssql import try_cast
stmt = select(
try_cast(product_table.c.unit_price, Numeric(10, 4))
)
The above would render::
SELECT TRY_CAST (product_table.unit_price AS NUMERIC(10, 4))
FROM product_table
.. versionadded:: 1.3.7
"""
super(TryCast, self).__init__(*arg, **kw)
try_cast = public_factory(TryCast, ".dialects.mssql.try_cast")
# old names.
MSDateTime = _MSDateTime
MSDate = _MSDate
MSReal = REAL
MSTinyInteger = TINYINT
MSTime = TIME
MSSmallDateTime = SMALLDATETIME
MSDateTime2 = DATETIME2
MSDateTimeOffset = DATETIMEOFFSET
MSText = TEXT
MSNText = NTEXT
MSString = VARCHAR
MSNVarchar = NVARCHAR
MSChar = CHAR
MSNChar = NCHAR
MSBinary = BINARY
MSVarBinary = VARBINARY
MSImage = IMAGE
MSBit = BIT
MSMoney = MONEY
MSSmallMoney = SMALLMONEY
MSUniqueIdentifier = UNIQUEIDENTIFIER
MSVariant = SQL_VARIANT
ischema_names = {
"int": INTEGER,
"bigint": BIGINT,
"smallint": SMALLINT,
"tinyint": TINYINT,
"varchar": VARCHAR,
"nvarchar": NVARCHAR,
"char": CHAR,
"nchar": NCHAR,
"text": TEXT,
"ntext": NTEXT,
"decimal": DECIMAL,
"numeric": NUMERIC,
"float": FLOAT,
"datetime": DATETIME,
"datetime2": DATETIME2,
"datetimeoffset": DATETIMEOFFSET,
"date": DATE,
"time": TIME,
"smalldatetime": SMALLDATETIME,
"binary": BINARY,
"varbinary": VARBINARY,
"bit": BIT,
"real": REAL,
"image": IMAGE,
"xml": XML,
"timestamp": TIMESTAMP,
"money": MONEY,
"smallmoney": SMALLMONEY,
"uniqueidentifier": UNIQUEIDENTIFIER,
"sql_variant": SQL_VARIANT,
}
class MSTypeCompiler(compiler.GenericTypeCompiler):
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
"""
if getattr(type_, "collation", None):
collation = "COLLATE %s" % type_.collation
else:
collation = None
if not length:
length = type_.length
if length:
spec = spec + "(%s)" % length
return " ".join([c for c in (spec, collation) if c is not None])
def visit_FLOAT(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {"precision": precision}
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_TIME(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is not None:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP"
def visit_ROWVERSION(self, type_, **kw):
return "ROWVERSION"
def visit_datetime(self, type_, **kw):
if type_.timezone:
return self.visit_DATETIMEOFFSET(type_, **kw)
else:
return self.visit_DATETIME(type_, **kw)
def visit_DATETIMEOFFSET(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is not None:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, "precision", None)
if precision is not None:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
def visit_SMALLDATETIME(self, type_, **kw):
return "SMALLDATETIME"
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARCHAR(type_, **kw)
else:
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_NVARCHAR(type_, **kw)
else:
return self.visit_NTEXT(type_, **kw)
def visit_NTEXT(self, type_, **kw):
return self._extend("NTEXT", type_)
def visit_TEXT(self, type_, **kw):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_, **kw):
return self._extend("VARCHAR", type_, length=type_.length or "max")
def visit_CHAR(self, type_, **kw):
return self._extend("CHAR", type_)
def visit_NCHAR(self, type_, **kw):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_, **kw):
return self._extend("NVARCHAR", type_, length=type_.length or "max")
def visit_date(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_DATE(type_, **kw)
def visit_time(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
return self.visit_DATETIME(type_, **kw)
else:
return self.visit_TIME(type_, **kw)
def visit_large_binary(self, type_, **kw):
if self.dialect.deprecate_large_types:
return self.visit_VARBINARY(type_, **kw)
else:
return self.visit_IMAGE(type_, **kw)
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_XML(self, type_, **kw):
return "XML"
def visit_VARBINARY(self, type_, **kw):
return self._extend("VARBINARY", type_, length=type_.length or "max")
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_JSON(self, type_, **kw):
# this is a bit of a break with SQLAlchemy's convention of
# "UPPERCASE name goes to UPPERCASE type name with no modification"
return self._extend("NVARCHAR", type_, length="max")
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
def visit_SQL_VARIANT(self, type_, **kw):
return "SQL_VARIANT"
class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_lastrowid = None
_rowcount = None
_result_strategy = None
def _opt_encode(self, statement):
if not self.dialect.supports_unicode_statements:
encoded = self.dialect._encoder(statement)[0]
else:
encoded = statement
if self.compiled and self.compiled.schema_translate_map:
rst = self.compiled.preparer._render_schema_translates
encoded = rst(encoded, self.compiled.schema_translate_map)
return encoded
def pre_exec(self):
"""Activate IDENTITY_INSERT if needed."""
if self.isinsert:
tbl = self.compiled.compile_state.dml_table
id_column = tbl._autoincrement_column
insert_has_identity = (id_column is not None) and (
not isinstance(id_column.default, Sequence)
)
if insert_has_identity:
compile_state = self.compiled.compile_state
self._enable_identity_insert = (
id_column.key in self.compiled_parameters[0]
) or (
compile_state._dict_parameters
and (
id_column.key in compile_state._dict_parameters
or id_column in compile_state._dict_parameters
)
)
else:
self._enable_identity_insert = False
self._select_lastrowid = (
not self.compiled.inline
and insert_has_identity
and not self.compiled.returning
and not self._enable_identity_insert
and not self.executemany
)
if self._enable_identity_insert:
self.root_connection._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s ON"
% self.identifier_preparer.format_table(tbl)
),
(),
self,
)
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self.isinsert or self.isupdate or self.isdelete:
self._rowcount = self.cursor.rowcount
if self._select_lastrowid:
if self.dialect.use_scope_identity:
conn._cursor_execute(
self.cursor,
"SELECT scope_identity() AS lastrowid",
(),
self,
)
else:
conn._cursor_execute(
self.cursor, "SELECT @@identity AS lastrowid", (), self
)
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
elif (
self.isinsert or self.isupdate or self.isdelete
) and self.compiled.returning:
self.cursor_fetch_strategy = (
_cursor.FullyBufferedCursorFetchStrategy(
self.cursor,
self.cursor.description,
self.cursor.fetchall(),
)
)
if self._enable_identity_insert:
conn._cursor_execute(
self.cursor,
self._opt_encode(
"SET IDENTITY_INSERT %s OFF"
% self.identifier_preparer.format_table(
self.compiled.compile_state.dml_table
)
),
(),
self,
)
def get_lastrowid(self):
return self._lastrowid
@property
def rowcount(self):
if self._rowcount is not None:
return self._rowcount
else:
return self.cursor.rowcount
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
self.cursor.execute(
self._opt_encode(
"SET IDENTITY_INSERT %s OFF"
% self.identifier_preparer.format_table(
self.compiled.compile_state.dml_table
)
)
)
except Exception:
pass
def get_result_cursor_strategy(self, result):
if self._result_strategy:
return self._result_strategy
else:
return super(MSExecutionContext, self).get_result_cursor_strategy(
result
)
def fire_sequence(self, seq, type_):
return self._execute_scalar(
(
"SELECT NEXT VALUE FOR %s"
% self.identifier_preparer.format_sequence(seq)
),
type_,
)
def get_insert_default(self, column):
if (
isinstance(column, sa_schema.Column)
and column is column.table._autoincrement_column
and isinstance(column.default, sa_schema.Sequence)
and column.default.optional
):
return None
return super(MSExecutionContext, self).get_insert_default(column)
class MSSQLCompiler(compiler.SQLCompiler):
returning_precedes_values = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"doy": "dayofyear",
"dow": "weekday",
"milliseconds": "millisecond",
"microseconds": "microsecond",
},
)
def __init__(self, *args, **kwargs):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
if self.dialect.legacy_schema_aliasing:
return fn(self, *arg, **kw)
else:
super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
return super_(*arg, **kw)
return decorate
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_current_date_func(self, fn, **kw):
return "GETDATE()"
def visit_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_char_length_func(self, fn, **kw):
return "LEN%s" % self.function_argspec(fn, **kw)
def visit_concat_op_binary(self, binary, operator, **kw):
return "%s + %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def get_select_precolumns(self, select, **kw):
"""MS-SQL puts TOP, it's version of LIMIT here"""
s = super(MSSQLCompiler, self).get_select_precolumns(select, **kw)
if select._has_row_limiting_clause and self._use_top(select):
# ODBC drivers and possibly others
# don't support bind params in the SELECT clause on SQL Server.
# so have to use literal here.
kw["literal_execute"] = True
s += "TOP %s " % self.process(
self._get_limit_or_fetch(select), **kw
)
if select._fetch_clause is not None:
if select._fetch_clause_options["percent"]:
s += "PERCENT "
if select._fetch_clause_options["with_ties"]:
s += "WITH TIES "
return s
def get_from_hint_text(self, table, text):
return text
def get_crud_hint_text(self, table, text):
return text
def _get_limit_or_fetch(self, select):
if select._fetch_clause is None:
return select._limit_clause
else:
return select._fetch_clause
def _use_top(self, select):
return (select._offset_clause is None) and (
select._simple_int_clause(select._limit_clause)
or (
# limit can use TOP with is by itself. fetch only uses TOP
# when it needs to because of PERCENT and/or WITH TIES
select._simple_int_clause(select._fetch_clause)
and (
select._fetch_clause_options["percent"]
or select._fetch_clause_options["with_ties"]
)
)
)
def fetch_clause(self, cs, **kwargs):
return ""
def limit_clause(self, cs, **kwargs):
return ""
def _check_can_use_fetch_limit(self, select):
# to use ROW_NUMBER(), an ORDER BY is required.
# OFFSET are FETCH are options of the ORDER BY clause
if not select._order_by_clause.clauses:
raise exc.CompileError(
"MSSQL requires an order_by when "
"using an OFFSET or a non-simple "
"LIMIT clause"
)
if select._fetch_clause_options is not None and (
select._fetch_clause_options["percent"]
or select._fetch_clause_options["with_ties"]
):
raise exc.CompileError(
"MSSQL needs TOP to use PERCENT and/or WITH TIES. "
"Only simple fetch without offset can be used."
)
def _row_limit_clause(self, select, **kw):
"""MSSQL 2012 supports OFFSET/FETCH operators
Use it instead subquery with row_number
"""
if self.dialect._supports_offset_fetch and not self._use_top(select):
self._check_can_use_fetch_limit(select)
text = ""
if select._offset_clause is not None:
offset_str = self.process(select._offset_clause, **kw)
else:
offset_str = "0"
text += "\n OFFSET %s ROWS" % offset_str
limit = self._get_limit_or_fetch(select)
if limit is not None:
text += "\n FETCH FIRST %s ROWS ONLY" % self.process(
limit, **kw
)
return text
else:
return ""
def visit_try_cast(self, element, **kw):
return "TRY_CAST (%s AS %s)" % (
self.process(element.clause, **kw),
self.process(element.typeclause, **kw),
)
def translate_select_structure(self, select_stmt, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``row_number()`` criterion.
MSSQL 2012 and above are excluded
"""
select = select_stmt
if (
select._has_row_limiting_clause
and not self.dialect._supports_offset_fetch
and not self._use_top(select)
and not getattr(select, "_mssql_visit", None)
):
self._check_can_use_fetch_limit(select)
_order_by_clauses = [
sql_util.unwrap_label_reference(elem)
for elem in select._order_by_clause.clauses
]
limit_clause = self._get_limit_or_fetch(select)
offset_clause = select._offset_clause
select = select._generate()
select._mssql_visit = True
select = (
select.add_columns(
sql.func.ROW_NUMBER()
.over(order_by=_order_by_clauses)
.label("mssql_rn")
)
.order_by(None)
.alias()
)
mssql_rn = sql.column("mssql_rn")
limitselect = sql.select(
*[c for c in select.c if c.key != "mssql_rn"]
)
if offset_clause is not None:
limitselect = limitselect.where(mssql_rn > offset_clause)
if limit_clause is not None:
limitselect = limitselect.where(
mssql_rn <= (limit_clause + offset_clause)
)
else:
limitselect = limitselect.where(mssql_rn <= (limit_clause))
return limitselect
else:
return select
@_with_legacy_schema_aliasing
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
# alias schema-qualified tables
alias = self._schema_aliased_table(table)
if alias is not None:
return self.process(alias, mssql_aliased=table, **kwargs)
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
@_with_legacy_schema_aliasing
def visit_alias(self, alias, **kw):
# translate for schema-qualified table aliases
kw["mssql_aliased"] = alias.element
return super(MSSQLCompiler, self).visit_alias(alias, **kw)
@_with_legacy_schema_aliasing
def visit_column(self, column, add_to_result_map=None, **kw):
if (
column.table is not None
and (not self.isupdate and not self.isdelete)
or self.is_subquery()
):
# translate for schema-qualified table aliases
t = self._schema_aliased_table(column.table)
if t is not None:
converted = elements._corresponding_column_or_error(t, column)
if add_to_result_map is not None:
add_to_result_map(
column.name,
column.name,
(column, column.name, column.key),
column.type,
)
return super(MSSQLCompiler, self).visit_column(converted, **kw)
return super(MSSQLCompiler, self).visit_column(
column, add_to_result_map=add_to_result_map, **kw
)
def _schema_aliased_table(self, table):
if getattr(table, "schema", None) is not None:
if table not in self.tablealiases:
self.tablealiases[table] = table.alias()
return self.tablealiases[table]
else:
return None
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return "DATEPART(%s, %s)" % (field, self.process(extract.expr, **kw))
def visit_savepoint(self, savepoint_stmt):
return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TRANSACTION %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
possible.
"""
if (
isinstance(binary.left, expression.BindParameter)
and binary.operator == operator.eq
and not isinstance(binary.right, expression.BindParameter)
):
return self.process(
expression.BinaryExpression(
binary.right, binary.left, binary.operator
),
**kwargs
)
return super(MSSQLCompiler, self).visit_binary(binary, **kwargs)
def returning_clause(self, stmt, returning_cols):
# SQL server returning clause requires that the columns refer to
# the virtual table names "inserted" or "deleted". Here, we make
# a simple alias of our table with that name, and then adapt the
# columns we have from the list of RETURNING columns to that new name
# so that they render as "inserted.<colname>" / "deleted.<colname>".
if self.isinsert or self.isupdate:
target = stmt.table.alias("inserted")
else:
target = stmt.table.alias("deleted")
adapter = sql_util.ClauseAdapter(target)
# adapter.traverse() takes a column from our target table and returns
# the one that is linked to the "inserted" / "deleted" tables. So in
# order to retrieve these values back from the result (e.g. like
# row[column]), tell the compiler to also add the original unadapted
# column to the result map. Before #4877, these were (unknowingly)
# falling back using string name matching in the result set which
# necessarily used an expensive KeyError in order to match.
columns = [
self._label_returning_column(
stmt,
adapter.traverse(c),
{"result_map_targets": (c,)},
)
for c in expression._select_iterables(returning_cols)
]
return "OUTPUT " + ", ".join(columns)
def get_cte_preamble(self, recursive):
# SQL Server finds it too inconvenient to accept
# an entirely optional, SQL standard specified,
# "RECURSIVE" word with their "WITH",
# so here we go
return "WITH"
def label_select_column(self, select, column, asfrom):
if isinstance(column, expression.Function):
return column.label(None)
else:
return super(MSSQLCompiler, self).label_select_column(
select, column, asfrom
)
def for_update_clause(self, select, **kw):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ""
def order_by_clause(self, select, **kw):
# MSSQL only allows ORDER BY in subqueries if there is a LIMIT
if (
self.is_subquery()
and not select._limit
and (
select._offset is None
or not self.dialect._supports_offset_fetch
)
):
# avoid processing the order by clause if we won't end up
# using it, because we don't want all the bind params tacked
# onto the positional list if that is what the dbapi requires
return ""
order_by = self.process(select._order_by_clause, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
"""If we have extra froms make sure we render any alias as hint."""
ashint = False
if extra_froms:
ashint = True
return from_table._compiler_dispatch(
self, asfrom=True, iscrud=True, ashint=ashint
)
def delete_extra_from_clause(
self, delete_stmt, from_table, extra_froms, from_hints, **kw
):
"""Render the DELETE .. FROM clause specific to MSSQL.
Yes, it has the FROM keyword twice.
"""
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in [from_table] + extra_froms
)
def visit_empty_set_expr(self, type_):
return "SELECT 1 WHERE 1!=1"
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "NOT EXISTS (SELECT %s INTERSECT SELECT %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
return "EXISTS (SELECT %s INTERSECT SELECT %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def _render_json_extract_from_binary(self, binary, operator, **kw):
# note we are intentionally calling upon the process() calls in the
# order in which they appear in the SQL String as this is used
# by positional parameter rendering
if binary.type._type_affinity is sqltypes.JSON:
return "JSON_QUERY(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
# as with other dialects, start with an explicit test for NULL
case_expression = "CASE JSON_VALUE(%s, %s) WHEN NULL THEN NULL" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
if binary.type._type_affinity is sqltypes.Integer:
type_expression = "ELSE CAST(JSON_VALUE(%s, %s) AS INTEGER)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
elif binary.type._type_affinity is sqltypes.Numeric:
type_expression = "ELSE CAST(JSON_VALUE(%s, %s) AS %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
"FLOAT"
if isinstance(binary.type, sqltypes.Float)
else "NUMERIC(%s, %s)"
% (binary.type.precision, binary.type.scale),
)
elif binary.type._type_affinity is sqltypes.Boolean:
# the NULL handling is particularly weird with boolean, so
# explicitly return numeric (BIT) constants
type_expression = (
"WHEN 'true' THEN 1 WHEN 'false' THEN 0 ELSE NULL"
)
elif binary.type._type_affinity is sqltypes.String:
# TODO: does this comment (from mysql) apply to here, too?
# this fails with a JSON value that's a four byte unicode
# string. SQLite has the same problem at the moment
type_expression = "ELSE JSON_VALUE(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
else:
# other affinity....this is not expected right now
type_expression = "ELSE JSON_QUERY(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
return case_expression + " " + type_expression + " END"
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self._render_json_extract_from_binary(binary, operator, **kw)
def visit_sequence(self, seq, **kw):
return "NEXT VALUE FOR %s" % self.preparer.format_sequence(seq)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
A dialect may use this compiler on a platform where native
binds are used.
"""
ansi_bind_rules = True
def visit_in_op_binary(self, binary, operator, **kw):
kw["literal_execute"] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_not_in_op_binary(self, binary, operator, **kw):
kw["literal_execute"] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def render_literal_value(self, value, type_):
"""
For date and datetime values, convert to a string
format acceptable to MSSQL. That seems to be the
so-called ODBC canonical date format which looks
like this:
yyyy-mm-dd hh:mi:ss.mmm(24h)
For other data types, call the base class implementation.
"""
# datetime and date are both subclasses of datetime.date
if issubclass(type(value), datetime.date):
# SQL Server wants single quotes around the date string.
return "'" + str(value) + "'"
else:
return super(MSSQLStrictCompiler, self).render_literal_value(
value, type_
)
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
# type is not accepted in a computed column
if column.computed is not None:
colspec += " " + self.process(column.computed)
else:
colspec += " " + self.dialect.type_compiler.process(
column.type, type_expression=column
)
if column.nullable is not None:
if (
not column.nullable
or column.primary_key
or isinstance(column.default, sa_schema.Sequence)
or column.autoincrement is True
or column.identity
):
colspec += " NOT NULL"
elif column.computed is None:
# don't specify "NULL" for computed columns
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL"
)
d_opt = column.dialect_options["mssql"]
start = d_opt["identity_start"]
increment = d_opt["identity_increment"]
if start is not None or increment is not None:
if column.identity:
raise exc.CompileError(
"Cannot specify options 'mssql_identity_start' and/or "
"'mssql_identity_increment' while also using the "
"'Identity' construct."
)
util.warn_deprecated(
"The dialect options 'mssql_identity_start' and "
"'mssql_identity_increment' are deprecated. "
"Use the 'Identity' object instead.",
"1.4",
)
if column.identity:
colspec += self.process(column.identity, **kwargs)
elif (
column is column.table._autoincrement_column
or column.autoincrement is True
) and (
not isinstance(column.default, Sequence) or column.default.optional
):
colspec += self.process(Identity(start=start, increment=increment))
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
clustered = index.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=include_schema),
preparer.format_table(index.table),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
# handle other included columns
if index.dialect_options["mssql"]["include"]:
inclusions = [
index.table.c[col]
if isinstance(col, util.string_types)
else col
for col in index.dialect_options["mssql"]["include"]
]
text += " INCLUDE (%s)" % ", ".join(
[preparer.quote(c.name) for c in inclusions]
)
whereclause = index.dialect_options["mssql"]["where"]
if whereclause is not None:
whereclause = coercions.expect(
roles.DDLExpressionRole, whereclause
)
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
def visit_drop_index(self, drop):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table),
)
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(
constraint
)
text += "PRIMARY KEY "
clustered = constraint.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name) for c in constraint
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE "
clustered = constraint.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name) for c in constraint
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_computed_column(self, generated):
text = "AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
# explicitly check for True|False since None means server default
if generated.persisted is True:
text += " PERSISTED"
return text
def visit_create_sequence(self, create, **kw):
prefix = None
if create.element.data_type is not None:
data_type = create.element.data_type
prefix = " AS %s" % self.type_compiler.process(data_type)
return super(MSDDLCompiler, self).visit_create_sequence(
create, prefix=prefix, **kw
)
def visit_identity_column(self, identity, **kw):
text = " IDENTITY"
if identity.start is not None or identity.increment is not None:
start = 1 if identity.start is None else identity.start
increment = 1 if identity.increment is None else identity.increment
text += "(%s,%s)" % (start, increment)
return text
class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(
dialect,
initial_quote="[",
final_quote="]",
quote_case_sensitive_collations=False,
)
def _escape_identifier(self, value):
return value.replace("]", "]]")
def _unescape_identifier(self, value):
return value.replace("]]", "]")
def quote_schema(self, schema, force=None):
"""Prepare a quoted table and schema name."""
# need to re-implement the deprecation warning entirely
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote_schema.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name().",
version="1.3",
)
dbname, owner = _schema_elements(schema)
if dbname:
result = "%s.%s" % (self.quote(dbname), self.quote(owner))
elif owner:
result = self.quote(owner)
else:
result = ""
return result
def _db_plus_owner_listing(fn):
def wrap(dialect, connection, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(
dbname,
connection,
fn,
dialect,
connection,
dbname,
owner,
schema,
**kw
)
return update_wrapper(wrap, fn)
def _db_plus_owner(fn):
def wrap(dialect, connection, tablename, schema=None, **kw):
dbname, owner = _owner_plus_db(dialect, schema)
return _switch_db(
dbname,
connection,
fn,
dialect,
connection,
tablename,
dbname,
owner,
schema,
**kw
)
return update_wrapper(wrap, fn)
def _switch_db(dbname, connection, fn, *arg, **kw):
if dbname:
current_db = connection.exec_driver_sql("select db_name()").scalar()
if current_db != dbname:
connection.exec_driver_sql(
"use %s" % connection.dialect.identifier_preparer.quote(dbname)
)
try:
return fn(*arg, **kw)
finally:
if dbname and current_db != dbname:
connection.exec_driver_sql(
"use %s"
% connection.dialect.identifier_preparer.quote(current_db)
)
def _owner_plus_db(dialect, schema):
if not schema:
return None, dialect.default_schema_name
elif "." in schema:
return _schema_elements(schema)
else:
return None, schema
_memoized_schema = util.LRUCache()
def _schema_elements(schema):
if isinstance(schema, quoted_name) and schema.quote:
return None, schema
if schema in _memoized_schema:
return _memoized_schema[schema]
# tests for this function are in:
# test/dialect/mssql/test_reflection.py ->
# OwnerPlusDBTest.test_owner_database_pairs
# test/dialect/mssql/test_compiler.py -> test_force_schema_*
# test/dialect/mssql/test_compiler.py -> test_schema_many_tokens_*
#
push = []
symbol = ""
bracket = False
has_brackets = False
for token in re.split(r"(\[|\]|\.)", schema):
if not token:
continue
if token == "[":
bracket = True
has_brackets = True
elif token == "]":
bracket = False
elif not bracket and token == ".":
if has_brackets:
push.append("[%s]" % symbol)
else:
push.append(symbol)
symbol = ""
has_brackets = False
else:
symbol += token
if symbol:
push.append(symbol)
if len(push) > 1:
dbname, owner = ".".join(push[0:-1]), push[-1]
# test for internal brackets
if re.match(r".*\].*\[.*", dbname[1:-1]):
dbname = quoted_name(dbname, quote=False)
else:
dbname = dbname.lstrip("[").rstrip("]")
elif len(push):
dbname, owner = None, push[0]
else:
dbname, owner = None, None
_memoized_schema[schema] = dbname, owner
return dbname, owner
class MSDialect(default.DefaultDialect):
# will assume it's at least mssql2005
name = "mssql"
supports_statement_cache = True
supports_default_values = True
supports_empty_insert = False
execution_ctx_cls = MSExecutionContext
use_scope_identity = True
max_identifier_length = 128
schema_name = "dbo"
implicit_returning = True
full_returning = True
colspecs = {
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate,
sqltypes.JSON: JSON,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
sqltypes.Time: TIME,
sqltypes.Unicode: _MSUnicode,
sqltypes.UnicodeText: _MSUnicodeText,
}
engine_config_types = default.DefaultDialect.engine_config_types.union(
{"legacy_schema_aliasing": util.asbool}
)
ischema_names = ischema_names
supports_sequences = True
sequences_optional = True
# T-SQL's actual default is -9223372036854775808
default_sequence_base = 1
supports_native_boolean = False
non_native_boolean_check_constraint = False
supports_unicode_binds = True
postfetch_lastrowid = True
_supports_offset_fetch = False
_supports_nvarchar_max = False
legacy_schema_aliasing = False
server_version_info = ()
statement_compiler = MSSQLCompiler
ddl_compiler = MSDDLCompiler
type_compiler = MSTypeCompiler
preparer = MSIdentifierPreparer
construct_arguments = [
(sa_schema.PrimaryKeyConstraint, {"clustered": None}),
(sa_schema.UniqueConstraint, {"clustered": None}),
(sa_schema.Index, {"clustered": None, "include": None, "where": None}),
(
sa_schema.Column,
{"identity_start": None, "identity_increment": None},
),
]
def __init__(
self,
query_timeout=None,
use_scope_identity=True,
schema_name="dbo",
isolation_level=None,
deprecate_large_types=None,
json_serializer=None,
json_deserializer=None,
legacy_schema_aliasing=None,
**opts
):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.deprecate_large_types = deprecate_large_types
if legacy_schema_aliasing is not None:
util.warn_deprecated(
"The legacy_schema_aliasing parameter is "
"deprecated and will be removed in a future release.",
"1.4",
)
self.legacy_schema_aliasing = legacy_schema_aliasing
super(MSDialect, self).__init__(**opts)
self.isolation_level = isolation_level
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
def do_savepoint(self, connection, name):
# give the DBAPI a push
connection.exec_driver_sql("IF @@TRANCOUNT = 0 BEGIN TRANSACTION")
super(MSDialect, self).do_savepoint(connection, name)
def do_release_savepoint(self, connection, name):
# SQL Server does not support RELEASE SAVEPOINT
pass
_isolation_lookup = set(
[
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"SNAPSHOT",
]
)
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET TRANSACTION ISOLATION LEVEL %s" % level)
cursor.close()
if level == "SNAPSHOT":
connection.commit()
def get_isolation_level(self, connection):
last_error = None
views = ("sys.dm_exec_sessions", "sys.dm_pdw_nodes_exec_sessions")
for view in views:
cursor = connection.cursor()
try:
cursor.execute(
"""
SELECT CASE transaction_isolation_level
WHEN 0 THEN NULL
WHEN 1 THEN 'READ UNCOMMITTED'
WHEN 2 THEN 'READ COMMITTED'
WHEN 3 THEN 'REPEATABLE READ'
WHEN 4 THEN 'SERIALIZABLE'
WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL
FROM %s
where session_id = @@SPID
"""
% view
)
val = cursor.fetchone()[0]
except self.dbapi.Error as err:
# Python3 scoping rules
last_error = err
continue
else:
return val.upper()
finally:
cursor.close()
else:
# note that the NotImplementedError is caught by
# DefaultDialect, so the warning here is all that displays
util.warn(
"Could not fetch transaction isolation level, "
"tried views: %s; final error was: %s" % (views, last_error)
)
raise NotImplementedError(
"Can't fetch isolation level on this particular "
"SQL Server version. tried views: %s; final error was: %s"
% (views, last_error)
)
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
self._setup_version_attributes()
self._setup_supports_nvarchar_max(connection)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
def _setup_version_attributes(self):
if self.server_version_info[0] not in list(range(8, 17)):
util.warn(
"Unrecognized server version info '%s'. Some SQL Server "
"features may not function properly."
% ".".join(str(x) for x in self.server_version_info)
)
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
if self.deprecate_large_types is None:
self.deprecate_large_types = (
self.server_version_info >= MS_2012_VERSION
)
self._supports_offset_fetch = (
self.server_version_info and self.server_version_info[0] >= 11
)
def _setup_supports_nvarchar_max(self, connection):
try:
connection.scalar(
sql.text("SELECT CAST('test max support' AS NVARCHAR(max))")
)
except exc.DBAPIError:
self._supports_nvarchar_max = False
else:
self._supports_nvarchar_max = True
def _get_default_schema_name(self, connection):
query = sql.text("SELECT schema_name()")
default_schema_name = connection.scalar(query)
if default_schema_name is not None:
# guard against the case where the default_schema_name is being
# fed back into a table reflection function.
return quoted_name(default_schema_name, quote=True)
else:
return self.schema_name
@_db_plus_owner
def has_table(self, connection, tablename, dbname, owner, schema):
self._ensure_has_table_connection(connection)
if tablename.startswith("#"): # temporary table
tables = ischema.mssql_temp_table_columns
s = sql.select(tables.c.table_name).where(
tables.c.table_name.like(
self._temp_table_name_like_pattern(tablename)
)
)
table_name = connection.execute(s.limit(1)).scalar()
if table_name:
# #6910: verify it's not a temp table from another session
obj_id = connection.execute(
text("SELECT object_id(:table_name)"),
{"table_name": "tempdb.dbo.[{}]".format(table_name)},
).scalar()
return bool(obj_id)
else:
return False
else:
tables = ischema.tables
s = sql.select(tables.c.table_name).where(
sql.and_(
tables.c.table_type == "BASE TABLE",
tables.c.table_name == tablename,
)
)
if owner:
s = s.where(tables.c.table_schema == owner)
c = connection.execute(s)
return c.first() is not None
@_db_plus_owner
def has_sequence(self, connection, sequencename, dbname, owner, schema):
sequences = ischema.sequences
s = sql.select(sequences.c.sequence_name).where(
sequences.c.sequence_name == sequencename
)
if owner:
s = s.where(sequences.c.sequence_schema == owner)
c = connection.execute(s)
return c.first() is not None
@reflection.cache
@_db_plus_owner_listing
def get_sequence_names(self, connection, dbname, owner, schema, **kw):
sequences = ischema.sequences
s = sql.select(sequences.c.sequence_name)
if owner:
s = s.where(sequences.c.sequence_schema == owner)
c = connection.execute(s)
return [row[0] for row in c]
@reflection.cache
def get_schema_names(self, connection, **kw):
s = sql.select(ischema.schemata.c.schema_name).order_by(
ischema.schemata.c.schema_name
)
schema_names = [r[0] for r in connection.execute(s)]
return schema_names
@reflection.cache
@_db_plus_owner_listing
def get_table_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = (
sql.select(tables.c.table_name)
.where(
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == "BASE TABLE",
)
)
.order_by(tables.c.table_name)
)
table_names = [r[0] for r in connection.execute(s)]
return table_names
@reflection.cache
@_db_plus_owner_listing
def get_view_names(self, connection, dbname, owner, schema, **kw):
tables = ischema.tables
s = (
sql.select(tables.c.table_name)
.where(
sql.and_(
tables.c.table_schema == owner,
tables.c.table_type == "VIEW",
)
)
.order_by(tables.c.table_name)
)
view_names = [r[0] for r in connection.execute(s)]
return view_names
@reflection.cache
@_db_plus_owner
def get_indexes(self, connection, tablename, dbname, owner, schema, **kw):
filter_definition = (
"ind.filter_definition"
if self.server_version_info >= MS_2008_VERSION
else "NULL as filter_definition"
)
rp = connection.execution_options(future_result=True).execute(
sql.text(
"select ind.index_id, ind.is_unique, ind.name, "
"%s "
"from sys.indexes as ind join sys.tables as tab on "
"ind.object_id=tab.object_id "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0 and ind.type != 0"
% filter_definition
)
.bindparams(
sql.bindparam("tabname", tablename, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
.columns(name=sqltypes.Unicode())
)
indexes = {}
for row in rp.mappings():
indexes[row["index_id"]] = {
"name": row["name"],
"unique": row["is_unique"] == 1,
"column_names": [],
"include_columns": [],
}
if row["filter_definition"] is not None:
indexes[row["index_id"]].setdefault("dialect_options", {})[
"mssql_where"
] = row["filter_definition"]
rp = connection.execution_options(future_result=True).execute(
sql.text(
"select ind_col.index_id, ind_col.object_id, col.name, "
"ind_col.is_included_column "
"from sys.columns as col "
"join sys.tables as tab on tab.object_id=col.object_id "
"join sys.index_columns as ind_col on "
"(ind_col.column_id=col.column_id and "
"ind_col.object_id=tab.object_id) "
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name=:tabname "
"and sch.name=:schname"
)
.bindparams(
sql.bindparam("tabname", tablename, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
.columns(name=sqltypes.Unicode())
)
for row in rp.mappings():
if row["index_id"] in indexes:
if row["is_included_column"]:
indexes[row["index_id"]]["include_columns"].append(
row["name"]
)
else:
indexes[row["index_id"]]["column_names"].append(
row["name"]
)
return list(indexes.values())
@reflection.cache
@_db_plus_owner
def get_view_definition(
self, connection, viewname, dbname, owner, schema, **kw
):
rp = connection.execute(
sql.text(
"select definition from sys.sql_modules as mod, "
"sys.views as views, "
"sys.schemas as sch"
" where "
"mod.object_id=views.object_id and "
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname"
).bindparams(
sql.bindparam("viewname", viewname, ischema.CoerceUnicode()),
sql.bindparam("schname", owner, ischema.CoerceUnicode()),
)
)
if rp:
view_def = rp.scalar()
return view_def
def _temp_table_name_like_pattern(self, tablename):
# LIKE uses '%' to match zero or more characters and '_' to match any
# single character. We want to match literal underscores, so T-SQL
# requires that we enclose them in square brackets.
return tablename + (
("[_][_][_]%") if not tablename.startswith("##") else ""
)
def _get_internal_temp_table_name(self, connection, tablename):
# it's likely that schema is always "dbo", but since we can
# get it here, let's get it.
# see https://stackoverflow.com/questions/8311959/
# specifying-schema-for-temporary-tables
try:
return connection.execute(
sql.text(
"select table_schema, table_name "
"from tempdb.information_schema.tables "
"where table_name like :p1"
),
{"p1": self._temp_table_name_like_pattern(tablename)},
).one()
except exc.MultipleResultsFound as me:
util.raise_(
exc.UnreflectableTableError(
"Found more than one temporary table named '%s' in tempdb "
"at this time. Cannot reliably resolve that name to its "
"internal table name." % tablename
),
replace_context=me,
)
except exc.NoResultFound as ne:
util.raise_(
exc.NoSuchTableError(
"Unable to find a temporary table named '%s' in tempdb."
% tablename
),
replace_context=ne,
)
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
is_temp_table = tablename.startswith("#")
if is_temp_table:
owner, tablename = self._get_internal_temp_table_name(
connection, tablename
)
columns = ischema.mssql_temp_table_columns
else:
columns = ischema.columns
computed_cols = ischema.computed_columns
identity_cols = ischema.identity_columns
if owner:
whereclause = sql.and_(
columns.c.table_name == tablename,
columns.c.table_schema == owner,
)
full_name = columns.c.table_schema + "." + columns.c.table_name
else:
whereclause = columns.c.table_name == tablename
full_name = columns.c.table_name
join = columns.join(
computed_cols,
onclause=sql.and_(
computed_cols.c.object_id == func.object_id(full_name),
computed_cols.c.name == columns.c.column_name,
),
isouter=True,
).join(
identity_cols,
onclause=sql.and_(
identity_cols.c.object_id == func.object_id(full_name),
identity_cols.c.name == columns.c.column_name,
),
isouter=True,
)
if self._supports_nvarchar_max:
computed_definition = computed_cols.c.definition
else:
# tds_version 4.2 does not support NVARCHAR(MAX)
computed_definition = sql.cast(
computed_cols.c.definition, NVARCHAR(4000)
)
s = (
sql.select(
columns,
computed_definition,
computed_cols.c.is_persisted,
identity_cols.c.is_identity,
identity_cols.c.seed_value,
identity_cols.c.increment_value,
)
.where(whereclause)
.select_from(join)
.order_by(columns.c.ordinal_position)
)
c = connection.execution_options(future_result=True).execute(s)
cols = []
for row in c.mappings():
name = row[columns.c.column_name]
type_ = row[columns.c.data_type]
nullable = row[columns.c.is_nullable] == "YES"
charlen = row[columns.c.character_maximum_length]
numericprec = row[columns.c.numeric_precision]
numericscale = row[columns.c.numeric_scale]
default = row[columns.c.column_default]
collation = row[columns.c.collation_name]
definition = row[computed_definition]
is_persisted = row[computed_cols.c.is_persisted]
is_identity = row[identity_cols.c.is_identity]
identity_start = row[identity_cols.c.seed_value]
identity_increment = row[identity_cols.c.increment_value]
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (
MSString,
MSChar,
MSNVarchar,
MSNChar,
MSText,
MSNText,
MSBinary,
MSVarBinary,
sqltypes.LargeBinary,
):
if charlen == -1:
charlen = None
kwargs["length"] = charlen
if collation:
kwargs["collation"] = collation
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (type_, name)
)
coltype = sqltypes.NULLTYPE
else:
if issubclass(coltype, sqltypes.Numeric):
kwargs["precision"] = numericprec
if not issubclass(coltype, sqltypes.Float):
kwargs["scale"] = numericscale
coltype = coltype(**kwargs)
cdict = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": is_identity is not None,
}
if definition is not None and is_persisted is not None:
cdict["computed"] = {
"sqltext": definition,
"persisted": is_persisted,
}
if is_identity is not None:
# identity_start and identity_increment are Decimal or None
if identity_start is None or identity_increment is None:
cdict["identity"] = {}
else:
if isinstance(coltype, sqltypes.BigInteger):
start = compat.long_type(identity_start)
increment = compat.long_type(identity_increment)
elif isinstance(coltype, sqltypes.Integer):
start = int(identity_start)
increment = int(identity_increment)
else:
start = identity_start
increment = identity_increment
cdict["identity"] = {
"start": start,
"increment": increment,
}
cols.append(cdict)
return cols
@reflection.cache
@_db_plus_owner
def get_pk_constraint(
self, connection, tablename, dbname, owner, schema, **kw
):
pkeys = []
TC = ischema.constraints
C = ischema.key_constraints.alias("C")
# Primary key constraints
s = (
sql.select(
C.c.column_name, TC.c.constraint_type, C.c.constraint_name
)
.where(
sql.and_(
TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == owner,
),
)
.order_by(TC.c.constraint_name, C.c.ordinal_position)
)
c = connection.execution_options(future_result=True).execute(s)
constraint_name = None
for row in c.mappings():
if "PRIMARY" in row[TC.c.constraint_type.name]:
pkeys.append(row["COLUMN_NAME"])
if constraint_name is None:
constraint_name = row[C.c.constraint_name.name]
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
@_db_plus_owner
def get_foreign_keys(
self, connection, tablename, dbname, owner, schema, **kw
):
RR = ischema.ref_constraints
C = ischema.key_constraints.alias("C")
R = ischema.key_constraints.alias("R")
# Foreign key constraints
s = (
sql.select(
C.c.column_name,
R.c.table_schema,
R.c.table_name,
R.c.column_name,
RR.c.constraint_name,
RR.c.match_option,
RR.c.update_rule,
RR.c.delete_rule,
)
.where(
sql.and_(
C.c.table_name == tablename,
C.c.table_schema == owner,
RR.c.constraint_schema == C.c.table_schema,
C.c.constraint_name == RR.c.constraint_name,
R.c.constraint_name == RR.c.unique_constraint_name,
R.c.constraint_schema == RR.c.unique_constraint_schema,
C.c.ordinal_position == R.c.ordinal_position,
)
)
.order_by(RR.c.constraint_name, R.c.ordinal_position)
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = []
def fkey_rec():
return {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
}
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).fetchall():
scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r
rec = fkeys[rfknm]
rec["name"] = rfknm
if not rec["referred_table"]:
rec["referred_table"] = rtbl
if schema is not None or owner != rschema:
if dbname:
rschema = dbname + "." + rschema
rec["referred_schema"] = rschema
local_cols, remote_cols = (
rec["constrained_columns"],
rec["referred_columns"],
)
local_cols.append(scol)
remote_cols.append(rcol)
return list(fkeys.values())
| 32.394451 | 113 | 0.597161 |
7946636a349651824531fa7e95ab244cfa4717a4 | 2,118 | py | Python | asco.py | rafly7/asco | 6d4e7207cc06b4b8601d2d8c14f8ddf5095699ee | [
"MIT"
] | null | null | null | asco.py | rafly7/asco | 6d4e7207cc06b4b8601d2d8c14f8ddf5095699ee | [
"MIT"
] | null | null | null | asco.py | rafly7/asco | 6d4e7207cc06b4b8601d2d8c14f8ddf5095699ee | [
"MIT"
] | null | null | null | #author : rafly dipoe.a
import argparse
import asyncio
import aiohttp
import sys
import time
#Scope variable
loop = asyncio.get_event_loop()
LG='\033[1;32m' #green
DT='\033[0m' # Default
R='\033[0;31m' #red
def _argument():
parser = argparse.ArgumentParser(description="Cheking xss url",formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-u",metavar="URL",dest="url",help="url target")
parser.add_argument("-o",metavar="OUTPUT",dest="output",help="output filename")
parser.add_argument("-d",metavar="DICTIONARY",dest="dictionary",help="file dictionary (default payload.txt)",default="payload.txt")
return parser
#Checking if url start with protocol https or http
def protocol(url):
if url.startswith("https://") or url.startswith("http://"):
return url
else:
if url.startswith("www"):
return f"https://{url}"
else:
return f"http://{url}"
async def argument_req():
global output
arg = _argument().parse_args()
if arg.output:
output = arg.output
if arg.url:
#start time async requests
ts = time.time()
session = aiohttp.ClientSession()
#gather url and payload with list in nested for async
for f in asyncio.as_completed([session.get(f"{arg.url}/{i}") for i in open(arg.dictionary,"r").readlines()]):
try:
resp = await f
if resp.status == 200:
print(f"\r{LG}[+] May Vulnerable Xss {resp.status}{DT}",end="")
f=open(output,"a+")
f.write(f"\n{resp.url}")
else:
print(f"\r{R}[-] Not Vulnerable Xss {resp.status}{DT}",end="")
#buffered screen response url
sys.stdout.flush()
await resp.release()
except Exception:
continue
await session.close()
print(f"\noutput : {output}\nFinished : {(time.time() - ts):.2f} seconds")
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(argument_req())
loop.close()
def banner():
print("""
█████╗ ███████╗ ██████╗ ██████╗
██╔══██╗██╔════╝██╔════╝██╔═══██╗
███████║███████╗██║ ██║ ██║
██╔══██║╚════██║██║ ██║ ██║
██║ ██║███████║╚██████╗╚██████╔╝
╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═════╝
""")
if __name__ == "__main__":
banner()
main()
| 26.475 | 132 | 0.615675 |
79466384e0c7d08ce5d10b50403399908ef1a994 | 3,632 | py | Python | tools/studio/cvra_studio/network/ParameterTree.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 40 | 2016-10-04T19:59:22.000Z | 2020-12-25T18:11:35.000Z | tools/studio/cvra_studio/network/ParameterTree.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 209 | 2016-09-21T21:54:28.000Z | 2022-01-26T07:42:37.000Z | tools/studio/cvra_studio/network/ParameterTree.py | greck2908/robot-software | 2e1e8177148a089e8883967375dde7f8ed3d878b | [
"MIT"
] | 21 | 2016-11-07T14:40:16.000Z | 2021-11-02T09:53:37.000Z | from collections import namedtuple
import logging
import queue
import uavcan
_Parameter = namedtuple("Parameter", ["name", "value", "type"])
class Parameter(_Parameter):
def __repr__(self):
return str(self.value)
def value_to_parameter(value, name=""):
if isinstance(value, bool):
return Parameter(name, value, bool)
elif isinstance(value, int):
return Parameter(name, value, int)
elif isinstance(value, float):
return Parameter(name, value, float)
elif isinstance(value, str):
return Parameter(name, value, str)
else:
return None
def extract_value(value):
"""
Given UAVCAN Value object, returns the value and its type
"""
if hasattr(value, "boolean_value"):
return (bool(value.boolean_value), bool)
elif hasattr(value, "integer_value"):
return (int(value.integer_value), int)
elif hasattr(value, "real_value"):
return (float(value.real_value), float)
elif hasattr(value, "string_value"):
return (str(value.string_value), str)
else:
return (None, None)
def value_to_uavcan(value, value_type):
"""
Given a value and its type, returns a UAVCAN Value object
"""
if value_type == bool:
return uavcan.protocol.param.Value(boolean_value=bool(value))
elif value_type == int:
return uavcan.protocol.param.Value(integer_value=int(value))
elif value_type == float:
return uavcan.protocol.param.Value(real_value=float(value))
elif value_type == str:
return uavcan.protocol.param.Value(string_value=str(value))
else:
return uavcan.protocol.param.Value()
def parameter_to_yaml(dumper, data):
"""
Given a YAML dumper and a Parameter data, returns a properly formatted value
"""
if data.type == bool:
return dumper.represent_scalar("tag:yaml.org,2002:bool", str(data))
elif data.type == int:
return dumper.represent_scalar("tag:yaml.org,2002:int", str(data))
elif data.type == float:
return dumper.represent_scalar("tag:yaml.org,2002:float", str(data))
elif data.type == str:
return dumper.represent_scalar("tag:yaml.org,2002:str", str(data))
else:
raise TypeError("Unsupported type {} for parameter".format(data.type))
class ParameterTree:
"""
Iterator for accessing a UAVCAN node's parameters
"""
def __init__(self, node, node_id):
self.logger = logging.getLogger("ParameterTree")
self.node = node
self.node_id = node_id
self.index = 0
self.q = queue.Queue()
self.done = False
def _request_next(self):
def callback(event):
if event:
value, value_type = extract_value(event.response.value)
self.q.put(
Parameter(
name=str(event.response.name), value=value, type=value_type
)
)
if len(event.response.name) == 0:
self.done = True
else:
self.logger.warning("Service request has timed out!")
self.q.put(None)
self.done = True
self.node.request(
uavcan.protocol.param.GetSet.Request(index=self.index),
self.node_id,
callback,
)
self.index = self.index + 1
def __iter__(self):
return self
def __next__(self):
self._request_next()
param = self.q.get(block=True)
if self.done:
raise StopIteration()
else:
return param
| 29.528455 | 83 | 0.60848 |
794663f00769e70b212db71032e84da4a1e7bfdf | 1,787 | py | Python | convert_tflite.py | elentail/Serving | 5aad0d310420bae31ab06972e4837b8309fda057 | [
"MIT"
] | null | null | null | convert_tflite.py | elentail/Serving | 5aad0d310420bae31ab06972e4837b8309fda057 | [
"MIT"
] | null | null | null | convert_tflite.py | elentail/Serving | 5aad0d310420bae31ab06972e4837b8309fda057 | [
"MIT"
] | null | null | null | import os
import numpy as np
import tensorflow as tf
# fixed folder
saved_model_dir = "tf_cnn_model/1/"
target_dir = "tflite_cnn_model"
def convert_tflite():
if not os.path.exists(target_dir):
os.makedirs(target_dir)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
#converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
tflite_model = converter.convert()
with open(f"{target_dir}/tflite_model.tflite", "wb") as f:
f.write(tflite_model)
def validation():
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
images = tf.convert_to_tensor(np.expand_dims(x_test/255.0, -1),dtype=tf.float32)
# Load the TFLite model in TFLite Interpreter
interpreter = tf.lite.Interpreter(f"{target_dir}/tflite_model.tflite")
# Model has single input.
in_node = interpreter.get_input_details()[0]
in_shape = in_node['shape']
# Model has single output.
out_node = interpreter.get_output_details()[0]
out_shape = out_node['shape']
# Resize Tensor (batch size)
interpreter.resize_tensor_input(in_node['index'],[len(images), in_shape[1], in_shape[2], in_shape[3]])
interpreter.resize_tensor_input(out_node['index'],[len(images), out_shape[1]])
# Needed before execution!
interpreter.allocate_tensors()
interpreter.set_tensor(in_node['index'], images)
interpreter.invoke()
prediction = interpreter.get_tensor(out_node['index'])
result = tf.argmax( prediction ,axis=1).numpy()
print('accuracy={:.4f}'.format(np.sum(result == y_test)/y_test.shape[0]))
if __name__ == '__main__':
convert_tflite()
validation() | 33.092593 | 110 | 0.691102 |
7946642c724f81e04e3091420010e35d5159f522 | 8,467 | py | Python | TxfmPlotWindow.py | dtauxe/hackcu-ad | 7870b741244f8f04cd87ae79896ef825425dcbd1 | [
"Apache-2.0"
] | null | null | null | TxfmPlotWindow.py | dtauxe/hackcu-ad | 7870b741244f8f04cd87ae79896ef825425dcbd1 | [
"Apache-2.0"
] | null | null | null | TxfmPlotWindow.py | dtauxe/hackcu-ad | 7870b741244f8f04cd87ae79896ef825425dcbd1 | [
"Apache-2.0"
] | null | null | null | # Window for plotting transform pairs
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt
from TxfmPlotHelper import TxfmPlotHelper
from poleSeeker import poleSeeker
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as QMatFigCanvas
#from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as QMatNavToolbar
import matplotlib.pyplot as plt
# Class for the window
class TxfmPlotWindow (QMainWindow):
# Constructor
def __init__(self):
super().__init__()
self.initUI()
# Initialize window & widgets
def initUI(self):
# Soft-coded stuffs
frameStyle = QFrame.StyledPanel
# Function for drawing the graphs based on current results
def drawGraphs(results, txfm):
# Helper to try calling plotter
def tryPlot(canvas, expr, x):
try:
p = tph.plot(expr, x)
except Exception as e:
QMessageBox.critical(self, "Error", "Unable to plot " + str(expr))
print(e)
p = tph.plot("0", x)
self.plot(canvas, p)
if (txfm.startswith('Inv')): # Invert results
sExpr, tExpr = results
else:
tExpr, sExpr = results
if (txfm == 'Laplace'): # Cut out ROC info
result_roc = sExpr[1:]
sExpr = sExpr[0]
# Plot tExpr
tryPlot(sigCanvas, tExpr, 't')
if (txfm.endswith('Laplace') or txfm.endswith('Z')):
FtLabel.setText("Pole-Zero Plot")
sLabel.setText("S Domain")
# TODO draw pole-zero plot
else: # Fourier
FtLabel.setText("Fourier Transform")
sLabel.setText("w Domain")
# Plot sExpr
tryPlot(FtCanvas, "abs("+str(sExpr)+")", 'w')
# Function for updating the Properties
def updateProperties(results, txfm):
# Currently this has a whole lot TODO
if (txfm == 'Z'):
roc = poleSeeker(results[0])
rocLabel.setText("ROC: |z| "+('>' if (roc[1]) else '<')+" " + roc[0])
else:
rocLabel.setText("ROC: N/A")
tph = TxfmPlotHelper()
# Functions for performing the transforms
def doTransform():
self.statusBar().showMessage("Running")
txfm = txfmSelBox.currentText()
try:
result = tph.Transform(tEdit.text(), txfm)
except:
QMessageBox.critical(self, "Error", "Unable to transform "+tEdit.text())
result = tEdit.text(), [0]
# Deal with result
self.statusBar().showMessage("Plotting")
res_expr = result[1][0] if txfm == 'Laplace' else result[1]
sEdit.setText(str(res_expr))
drawGraphs(result, txfm)
updateProperties(result, txfm)
self.statusBar().showMessage("Ready")
def doInvTransform():
self.statusBar().showMessage("Running")
txfm = "Inv" + txfmSelBox.currentText()
try:
result = tph.Transform(sEdit.text(), txfm)
except:
QMessageBox.critical(self, "Error", "Unable to transform "+sEdit.text())
result = sEdit.text(), 0
# Deal with result
self.statusBar().showMessage("Plotting")
tEdit.setText(str(result[1]))
drawGraphs(result, txfm)
updateProperties(result, txfm)
self.statusBar().showMessage("Ready")
QToolTip.setFont(QFont('SansSerif', 10))
self.statusBar().showMessage("Ready")
#### Menubar ####
menubar = self.menuBar()
# Menus
fileMenu = menubar.addMenu('&File')
# Items
exitAct = QAction('E&xit', self)
#exitAct.setShortcut('Ctrl+Q')
exitAct.setStatusTip("Exit application")
exitAct.triggered.connect(qApp.quit)
fileMenu.addAction(exitAct)
# Matplotlib figures
tFigure = plt.figure()
sFigure = plt.figure()
#self.plot(tFigure)
#self.plot(sFigure)
# Input labels
tLabel = QLabel("Time Domain")
sLabel = QLabel("S Domain")
txfmLabel = QLabel("Transform")
selLabel = QLabel("Select Transform")
# Input widgets
tEdit = QLineEdit() # Time domain
sEdit = QLineEdit() # S domain
#tEdit.textChanged[str].connect(sEdit.setText)
t2sButton = QPushButton('⇓')
t2sButton.clicked.connect(doTransform)
tEdit.returnPressed.connect(doTransform)
s2tButton = QPushButton('⇑')
s2tButton.clicked.connect(doInvTransform)
sEdit.returnPressed.connect(doInvTransform)
txfmSelBox = QComboBox()
txfmSelBox.addItems(['Laplace', 'Fourier', 'Z'])
# Output labels
sigLabel = QLabel("Signal")
FtLabel = QLabel("Transform")
propLabel = QLabel("Properties")
# Output widgets
sigCanvas = QMatFigCanvas(tFigure)
FtCanvas = QMatFigCanvas(sFigure)
rocLabel = QLabel("ROC: N/A")
stableLabel = QLabel("Stable: N/A")
causalLabel = QLabel("Causal: N/A")
# Layout
LRSplitter = QSplitter(Qt.Horizontal)
# Left Column
gridL = QGridLayout()
gridL.setColumnStretch(1,1)
gridL.setColumnStretch(2,1)
LFrame = QFrame()
LFrame.setLayout(gridL)
LFrame.setFrameShape(frameStyle)
LRSplitter.addWidget(LFrame)
# Right Column
outSplitter = QSplitter(Qt.Vertical)
outSplitter.setFrameShape(frameStyle)
RFrame = QFrame()
#RFrame.setLayout(gridR)
sigFrame = QFrame()
sigGrid = QGridLayout()
sigFrame.setLayout(sigGrid)
sigFrame.setFrameShape(frameStyle)
FtFrame = QFrame()
FtGrid = QGridLayout()
FtFrame.setLayout(FtGrid)
outSplitter.addWidget(sigFrame)
outSplitter.addWidget(FtFrame)
LRSplitter.addWidget(outSplitter)
# Upper left
gridL.addWidget(tLabel, 0, 0)
gridL.addWidget(tEdit, 0, 1, 1, 2)
gridL.addWidget(txfmLabel, 1, 0)
gridL.addWidget(t2sButton, 1, 1)
gridL.addWidget(s2tButton, 1, 2)
gridL.addWidget(sLabel, 2, 0)
gridL.addWidget(sEdit, 2, 1, 1, 2)
gridL.addWidget(selLabel, 3, 0)
gridL.addWidget(txfmSelBox, 3, 1, 1, 2)
# Lower left
gridL.addWidget(QWidget(), 4, 0) # For spacing
gridL.setRowStretch(4,1)
gridL.addWidget(propLabel, 5, 0, 1, 3)
gridL.addWidget(rocLabel, 6, 0, 1, 3)
gridL.addWidget(stableLabel, 7, 0, 1, 3)
gridL.addWidget(causalLabel, 8, 0, 1, 3)
# Uppder Right
sigGrid.addWidget(sigLabel, 0, 0)
sigGrid.addWidget(sigCanvas, 1, 0, 3, 1)
# Lower Right
FtGrid.addWidget(FtLabel, 4, 0)
FtGrid.addWidget(FtCanvas, 5, 0, 3, 1)
self.setCentralWidget(LRSplitter)
# Window setup
#self.centralWidget().setLayout(grid)
#self.move(300, 300)
#self.resize(300, 220)
self.setWindowTitle("Transform Pair Plotter")
#self.setWindowIcon(QIcon("icon.png"))
self.show()
def plot(self, canvas, symplot):
# Helper func from https://stackoverflow.com/questions/46810880/display-two-sympy-plots-as-two-matplotlib-subplots
def move_symplot_to_axes(p, ax):
backend = p.backend(p)
backend.ax = ax
backend.process_series()
backend.ax.spines['right'].set_color('none')
backend.ax.spines['bottom'].set_position('zero')
backend.ax.spines['top'].set_color('none')
plt.close(backend.fig)
figure = canvas.figure
# instead of ax.hold(False)
figure.clear()
# create an axis
ax = figure.add_subplot(111)
# move data to plot
move_symplot_to_axes(symplot, ax)
#ax.plot(data, '*-')
# refresh canvas
canvas.draw()
## MAIN for testing
if __name__ == '__main__':
app = QApplication(sys.argv)
window = TxfmPlotWindow()
sys.exit(app.exec_())
| 33.733068 | 126 | 0.568088 |
7946643ec7f55932414f9c9964769a94e36b6d98 | 323 | py | Python | test/test_error.py | coord-e/lottery-backend | 4e1136ea62c471c98871ae28ad6b5144657151b4 | [
"MIT"
] | 3 | 2019-03-17T12:53:25.000Z | 2020-06-28T07:05:47.000Z | test/test_error.py | coord-e/lottery-backend | 4e1136ea62c471c98871ae28ad6b5144657151b4 | [
"MIT"
] | 297 | 2018-06-23T09:48:04.000Z | 2021-09-08T00:06:11.000Z | test/test_error.py | Sakuten/lottery-backend | 4e1136ea62c471c98871ae28ad6b5144657151b4 | [
"MIT"
] | 3 | 2019-03-07T15:38:19.000Z | 2019-03-30T08:00:14.000Z | from api.error import error_response
def test_error_response(client):
with client.application.app_context():
resp, http_code = error_response(0)
resp = resp.get_json()
assert http_code is not None
assert 'message' in resp
assert 'code' in resp
assert resp['code'] == 0
| 26.916667 | 43 | 0.650155 |
79466682fd554ecfd18cb9c882be5af04d138c24 | 7,187 | py | Python | tests/test_freshroastsr700.py | trankin/freshroastsr700 | 85e2a353bb8e88dc04d9319b5476447620c37d1e | [
"MIT"
] | 12 | 2017-04-07T16:27:58.000Z | 2021-06-22T14:58:22.000Z | tests/test_freshroastsr700.py | trankin/freshroastsr700 | 85e2a353bb8e88dc04d9319b5476447620c37d1e | [
"MIT"
] | 23 | 2015-11-17T03:16:00.000Z | 2021-12-11T00:42:53.000Z | tests/test_freshroastsr700.py | trankin/freshroastsr700 | 85e2a353bb8e88dc04d9319b5476447620c37d1e | [
"MIT"
] | 12 | 2016-06-25T13:30:52.000Z | 2020-10-11T15:36:32.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 Mark Spicer
# Made available under the MIT license.
import unittest
import freshroastsr700
from freshroastsr700 import exceptions
class TestFreshroastsr700(unittest.TestCase):
def setUp(self):
self.roaster = freshroastsr700.freshroastsr700(thermostat=True)
def test_init_var_header(self):
self.assertEqual(self.roaster._header.value, b'\xAA\xAA')
def test_init_var_temp_unit(self):
self.assertEqual(self.roaster._temp_unit.value, b'\x61\x74')
def test_init_var_flags(self):
self.assertEqual(self.roaster._flags.value, b'\x63')
def test_init_var_current_state(self):
self.assertEqual(self.roaster._current_state.value, b'\x02\x01')
def test_init_var_footer(self):
self.assertEqual(self.roaster._footer, b'\xAA\xFA')
def test_init_var_fan_speed(self):
self.assertEqual(self.roaster._fan_speed.value, 1)
def test_init_var_heat_setting(self):
self.assertEqual(self.roaster._heat_setting.value, 0)
def test_init_var_time_remaining(self):
self.assertEqual(self.roaster.time_remaining, 0)
def test_init_var_current_temp(self):
self.assertEqual(self.roaster.current_temp, 150)
def test_generate_packet(self):
packet = self.roaster._generate_packet()
self.assertEqual(
packet, b'\xaa\xaaatc\x02\x01\x01\x00\x00\x00\x00\xaa\xfa')
def test_idle(self):
self.roaster.idle()
self.assertEqual(self.roaster._current_state.value, b'\x02\x01')
def test_roast(self):
self.roaster.roast()
self.assertEqual(self.roaster._current_state.value, b'\x04\x02')
def test_cool(self):
self.roaster.cool()
self.assertEqual(self.roaster._current_state.value, b'\x04\x04')
def test_sleep(self):
self.roaster.sleep()
self.assertEqual(self.roaster._current_state.value, b'\x08\x01')
def test_getting_var_fan_speed(self):
self.assertEqual(self.roaster.fan_speed, 1)
def test_setting_var_fan_speed_high(self):
with self.assertRaises(exceptions.RoasterValueError):
self.roaster.fan_speed = 10
def test_setting_var_fan_speed_low(self):
with self.assertRaises(exceptions.RoasterValueError):
self.roaster.fan_speed = 0
def test_setting_var_fan_speed_incorrect(self):
with self.assertRaises(exceptions.RoasterValueError):
self.roaster.fan_speed = 'w'
def test_seting_var_fan_speed_correct(self):
self.roaster.fan_speed = 6
self.assertEqual(self.roaster.fan_speed, 6)
def test_getting_time_remaining(self):
self.assertEqual(self.roaster.time_remaining, 0)
def test_getting_heat_setting(self):
self.assertEqual(self.roaster.heat_setting, 0)
def test_setting_var_heat_setting_high(self):
with self.assertRaises(exceptions.RoasterValueError):
self.roaster.heat_setting = 4
def test_setting_var_heat_setting_low(self):
with self.assertRaises(exceptions.RoasterValueError):
self.roaster.heat_setting = -1
def test_setting_var_heat_setting_incorrect(self):
with self.assertRaises(exceptions.RoasterValueError):
self.roaster.heat_setting = 'w'
def test_setting_var_heat_setting_correct(self):
self.roaster.heat_setting = 3
self.assertEqual(self.roaster.heat_setting, 3)
def test_disconnect(self):
self.roaster.disconnect()
self.assertTrue(self.roaster._disconnect.value)
def test_get_roaster_state_roasting(self):
self.roaster._current_state.value = b'\x04\x02'
self.assertEqual('roasting', self.roaster.get_roaster_state())
def test_get_roaster_state_cooling(self):
self.roaster._current_state.value = b'\x04\x04'
self.assertEqual('cooling', self.roaster.get_roaster_state())
def test_get_roaster_state_idle(self):
self.roaster._current_state.value = b'\x02\x01'
self.assertEqual('idle', self.roaster.get_roaster_state())
def test_get_roaster_state_sleeping(self):
self.roaster._current_state.value = b'\x08\x01'
self.assertEqual('sleeping', self.roaster.get_roaster_state())
def test_get_roaster_state_connecting(self):
self.roaster._current_state.value = b'\x00\x00'
self.assertEqual('connecting', self.roaster.get_roaster_state())
def test_get_roaster_state_uknown(self):
self.roaster._current_state.value = b'\x13\x41'
self.assertEqual('unknown', self.roaster.get_roaster_state())
def test_heat_controller_4_segment_output(self):
heater = freshroastsr700.heat_controller(number_of_segments=4)
heater.heat_level = 0
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertTrue(heater.about_to_rollover())
heater.heat_level = 1
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertTrue(heater.about_to_rollover())
heater.heat_level = 2
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertTrue(heater.about_to_rollover())
heater.heat_level = 3
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertFalse(heater.generate_bangbang_output())
self.assertTrue(heater.about_to_rollover())
heater.heat_level = 4
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertTrue(heater.generate_bangbang_output())
self.assertFalse(heater.about_to_rollover())
self.assertTrue(heater.generate_bangbang_output())
self.assertTrue(heater.about_to_rollover())
| 39.927778 | 72 | 0.716572 |
794667005b4f96118d524d96e9d320d902e4928f | 1,842 | py | Python | pywavefront/exceptions.py | RoyJames/PyWavefront | 7f5e713f935e5ff109de8a0cd2f3eaba04e9722c | [
"BSD-3-Clause"
] | 139 | 2019-02-25T11:42:05.000Z | 2022-03-17T18:49:27.000Z | pywavefront/exceptions.py | RoyJames/PyWavefront | 7f5e713f935e5ff109de8a0cd2f3eaba04e9722c | [
"BSD-3-Clause"
] | 72 | 2016-01-22T14:00:31.000Z | 2019-02-22T16:47:15.000Z | pywavefront/exceptions.py | RoyJames/PyWavefront | 7f5e713f935e5ff109de8a0cd2f3eaba04e9722c | [
"BSD-3-Clause"
] | 42 | 2015-07-29T16:01:57.000Z | 2019-01-15T09:17:40.000Z | # ----------------------------------------------------------------------------
# PyWavefront
# Copyright (c) 2018 Kurt Yoder
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of PyWavefront nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
class PywavefrontException(Exception):
"""Generic exception for this package to separate from common ones"""
pass
| 47.230769 | 78 | 0.70684 |
79466732dead6421793b2369c0a1ecd9d1e1cfa4 | 7,719 | py | Python | custom/ilsgateway/tanzania/reports/facility_details.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | custom/ilsgateway/tanzania/reports/facility_details.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | custom/ilsgateway/tanzania/reports/facility_details.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from corehq.apps.commtrack.models import StockState
from corehq.apps.locations.dbaccessors import get_user_docs_by_location
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.sms.models import SMS, OUTGOING
from corehq.util.timezones.conversions import ServerTime
from corehq.const import SERVER_DATETIME_FORMAT_NO_SEC
from custom.ilsgateway.models import SupplyPointStatusTypes, ILSNotes
from custom.ilsgateway.tanzania import ILSData, MultiReport
from custom.ilsgateway.tanzania.reports.stock_on_hand import StockOnHandReport
from custom.ilsgateway.tanzania.reports.utils import decimal_format, float_format, latest_status
from memoized import memoized
from django.utils.translation import ugettext as _
class InventoryHistoryData(ILSData):
title = 'Inventory History'
slug = 'inventory_history'
show_chart = False
show_table = True
default_rows = 100
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_('Product')),
DataTablesColumn(_('Stock on Hand')),
DataTablesColumn(_('Months of stock'))
)
return headers
@property
def rows(self):
rows = []
if self.config['location_id']:
sp = SQLLocation.objects.get(location_id=self.config['location_id']).supply_point_id
ss = StockState.objects.filter(
sql_product__is_archived=False,
case_id=sp,
product_id__in=self.config['products']
)
for stock in ss:
def calculate_months_remaining(stock_state, quantity):
consumption = stock_state.get_monthly_consumption()
if consumption is not None and consumption > 0 and quantity is not None:
return float(quantity) / float(consumption)
elif quantity == 0:
return 0
return None
rows.append([stock.sql_product.name, decimal_format(stock.stock_on_hand),
float_format(calculate_months_remaining(stock, stock.stock_on_hand))])
return rows
class RegistrationData(ILSData):
show_chart = False
show_table = True
searchable = True
@property
def title(self):
return '%s Contacts' % self.config['loc_type']
@property
def slug(self):
return '%s_registration' % self.config['loc_type'].lower()
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Name')),
DataTablesColumn(_('Role')),
DataTablesColumn(_('Phone')),
DataTablesColumn(_('Email')),
)
@property
def rows(self):
location = SQLLocation.objects.get(location_id=self.config['location_id'])
if self.config['loc_type'] == 'DISTRICT':
location = location.parent
elif self.config['loc_type'] == 'REGION':
location = location.parent.parent
users = get_user_docs_by_location(self.config['domain'], location.location_id)
if users:
for user in users:
u = user['doc']
yield [
'{0} {1}'.format(u['first_name'], u['last_name']),
u['user_data']['role'] if 'role' in u['user_data'] else 'No Role',
u['phone_numbers'][0] if u['phone_numbers'] else '',
u['email']
]
class RandRHistory(ILSData):
slug = 'randr_history'
title = 'R & R History'
show_chart = False
show_table = True
@property
def rows(self):
return latest_status(self.config['location_id'], SupplyPointStatusTypes.R_AND_R_FACILITY)
class Notes(ILSData):
slug = 'ils_notes'
title = 'Notes'
show_chart = False
show_table = True
use_datatables = True
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Name')),
DataTablesColumn(_('Role')),
DataTablesColumn(_('Date')),
DataTablesColumn(_('Phone')),
DataTablesColumn(_('Text'))
)
@property
def rows(self):
location = SQLLocation.objects.get(location_id=self.config['location_id'])
rows = ILSNotes.objects.filter(domain=self.config['domain'], location=location).order_by('date')
for row in rows:
yield [
row.user_name,
row.user_role,
row.date.strftime(SERVER_DATETIME_FORMAT_NO_SEC),
row.user_phone,
row.text
]
def _fmt_timestamp(timestamp):
return dict(
sort_key=timestamp,
html=timestamp.strftime("%Y-%m-%d %H:%M:%S"),
)
def _fmt(val):
if val is None:
val = '-'
return dict(sort_key=val, html=val)
class RecentMessages(ILSData):
slug = 'recent_messages'
title = 'Recent messages'
show_chart = False
show_table = True
default_rows = 5
use_datatables = True
def __init__(self, config=None):
super(RecentMessages, self).__init__(config, 'row_chart_all')
@property
def headers(self):
header = DataTablesHeader(
DataTablesColumn('Date'),
DataTablesColumn('User'),
DataTablesColumn('Phone number'),
DataTablesColumn('Direction'),
DataTablesColumn('Text')
)
header.custom_sort = [[0, 'desc']]
return header
@property
def rows(self):
data = (SMS.by_domain(self.config['domain'])
.filter(location_id=self.config['location_id'])
.exclude(processed=False, direction=OUTGOING)
.order_by('-date'))
messages = []
for message in data:
recipient = message.recipient
timestamp = ServerTime(message.date).user_time(self.config['timezone']).done()
messages.append([
_fmt_timestamp(timestamp),
recipient.full_name,
message.phone_number,
_fmt(message.direction),
_fmt(message.text),
])
return messages
class FacilityDetailsReport(MultiReport):
fields = []
hide_filters = True
name = "Facility Details"
slug = 'facility_details'
use_datatables = True
parent_report_class = StockOnHandReport
@property
def title(self):
if self.location and self.location.location_type.name.upper() == 'FACILITY':
return "{0} ({1}) Group {2}".format(self.location.name,
self.location.site_code,
self.location.metadata.get('group', '---'))
return 'Facility Details Report'
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return False
@property
@memoized
def data_providers(self):
config = self.report_config
return [
InventoryHistoryData(config=config),
RandRHistory(config=config),
Notes(config=config),
RecentMessages(config=config),
RegistrationData(config=dict(loc_type='FACILITY', **config), css_class='row_chart_all'),
RegistrationData(config=dict(loc_type='DISTRICT', **config), css_class='row_chart_all'),
RegistrationData(config=dict(loc_type='REGION', **config), css_class='row_chart_all')
]
| 32.987179 | 104 | 0.609794 |
7946674b69742ef5e4ec7f1624dc932023a07153 | 98 | py | Python | flusher/flusher/cli.py | LeastAuthority/bandchain | 68814332e458767fe09a2856f43df6ce0545713e | [
"Apache-2.0"
] | 1 | 2021-06-29T04:20:50.000Z | 2021-06-29T04:20:50.000Z | flusher/flusher/cli.py | LeastAuthority/bandchain | 68814332e458767fe09a2856f43df6ce0545713e | [
"Apache-2.0"
] | 2 | 2020-07-07T12:40:17.000Z | 2020-07-20T01:51:55.000Z | flusher/flusher/cli.py | LeastAuthority/bandchain | 68814332e458767fe09a2856f43df6ce0545713e | [
"Apache-2.0"
] | null | null | null | import click
@click.group()
def cli():
"""BandChain's flusher utility program."""
pass
| 10.888889 | 46 | 0.632653 |
7946679a43e7677634a08ad38adb945548628d8a | 4,054 | py | Python | src/pretalx/common/views.py | MaxRink/pretalx | 6f0e944ba1721b0d665b180407b40a558940aca6 | [
"Apache-2.0"
] | null | null | null | src/pretalx/common/views.py | MaxRink/pretalx | 6f0e944ba1721b0d665b180407b40a558940aca6 | [
"Apache-2.0"
] | null | null | null | src/pretalx/common/views.py | MaxRink/pretalx | 6f0e944ba1721b0d665b180407b40a558940aca6 | [
"Apache-2.0"
] | null | null | null | import os.path
import urllib
from contextlib import suppress
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login
from django.http import FileResponse, Http404
from django.shortcuts import redirect
from django.utils.http import is_safe_url
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from django.views.generic.edit import ModelFormMixin, ProcessFormView
from pretalx.cfp.forms.auth import ResetForm
from pretalx.common.mail import SendMailException
from pretalx.common.phrases import phrases
from pretalx.person.forms import UserForm
from pretalx.person.models import User
class CreateOrUpdateView(
SingleObjectTemplateResponseMixin, ModelFormMixin, ProcessFormView
):
def set_object(self):
if getattr(self, 'object', None) is None:
setattr(self, 'object', None)
with suppress(self.model.DoesNotExist, AttributeError):
setattr(self, 'object', self.get_object())
def get(self, request, *args, **kwargs):
self.set_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.set_object()
return super().post(request, *args, **kwargs)
def is_form_bound(request, form_name, form_param='form'):
return request.method == 'POST' and request.POST.get(form_param) == form_name
def get_static(request, path, content_type):
"""TODO: move to staticfiles usage as per https://gist.github.com/SmileyChris/8d472f2a67526e36f39f3c33520182bc
This would avoid potential directory traversal by … a malicious urlconfig, so not a huge attack vector."""
path = os.path.join(settings.BASE_DIR, 'pretalx/static', path)
if not os.path.exists(path):
raise Http404()
return FileResponse(open(path, 'rb'), content_type=content_type, as_attachment=False)
class GenericLoginView(FormView):
form_class = UserForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['password_reset_link'] = self.get_password_reset_link()
return context
def form_valid(self, form):
pk = form.save()
user = User.objects.filter(pk=pk).first()
if not user:
messages.error(
self.request,
_(
'There was an error when logging in. Please contact the organiser for further help.'
),
)
return redirect(self.get_error_url())
if not user.is_active:
messages.error(self.request, _('User account is deactivated.'))
return redirect(self.get_error_redirect())
login(self.request, user, backend='django.contrib.auth.backends.ModelBackend')
params = self.request.GET.copy()
url = urllib.parse.unquote(params.pop('next', [''])[0])
if url and is_safe_url(url, allowed_hosts=None):
return redirect(url + ('?' + params.urlencode() if params else ''))
return redirect(self.get_success_url())
class GenericResetView(FormView):
form_class = ResetForm
def form_valid(self, form):
user = form.cleaned_data['user']
if not user or (
user.pw_reset_time
and (now() - user.pw_reset_time).total_seconds() < 3600 * 24
):
messages.success(self.request, phrases.cfp.auth_password_reset)
return redirect(self.get_success_url())
try:
user.reset_password(event=getattr(self.request, 'event', None))
except SendMailException:
messages.error(self.request, phrases.base.error_sending_mail)
return self.get(self.request, *self.args, **self.kwargs)
messages.success(self.request, phrases.cfp.auth_password_reset)
user.log_action('pretalx.user.password.reset')
return redirect(self.get_success_url())
| 36.522523 | 114 | 0.686976 |
7946681c51648b1074aae3f3ad3baf1458243a65 | 2,306 | py | Python | tools/Polygraphy/polygraphy/backend/pyt/runner.py | 5had3z/TensorRT | 8561894f7373c5c87a0b67c9a8661b345971aa09 | [
"Apache-2.0"
] | 4 | 2021-04-16T13:49:38.000Z | 2022-01-16T08:58:07.000Z | tools/Polygraphy/polygraphy/backend/pyt/runner.py | 5had3z/TensorRT | 8561894f7373c5c87a0b67c9a8661b345971aa09 | [
"Apache-2.0"
] | null | null | null | tools/Polygraphy/polygraphy/backend/pyt/runner.py | 5had3z/TensorRT | 8561894f7373c5c87a0b67c9a8661b345971aa09 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from polygraphy.backend.base import BaseRunner
from polygraphy.util import misc
class PytRunner(BaseRunner):
def __init__(self, model, input_metadata, output_names, name=None):
"""
Args:
model (Callable() -> torch.nn.Module):
A model loader that returns a torch.nn.Module or subclass.
input_metadata (TensorMetadata): Mapping of input names to their data types and shapes.
output_names (List[str]):
A list of output names of the model. This information is used by the
Comparator to determine which outputs to compare.
name (str):
The human-readable name prefix to use for this runner.
A runner count and timestamp will be appended to this prefix.
"""
super().__init__(name=name, prefix="pytorch-runner")
self._model = model
self.input_metadata = input_metadata
self.output_names = output_names
def activate_impl(self):
self.model, _ = misc.try_call(self._model)
self.model.eval()
def infer(self, feed_dict):
with torch.no_grad():
inputs = [torch.from_numpy(val.astype(dtype)).cuda() for (val, (dtype, _)) in zip(feed_dict.values(), self.input_metadata.values())]
start = time.time()
outputs = self.model(*inputs)
end = time.time()
out_dict = OrderedDict()
for name, output in zip(self.output_names, outputs):
out_dict[name] = output.cpu().numpy()
return out_dict, end - start
def get_input_metadata(self):
return self.input_metadata
| 36.603175 | 144 | 0.651344 |
7946684f199be9c500036d85a3e981c4b3cc5b54 | 553 | py | Python | nbkode/testsuite/__init__.py | Yash-10/numbakit-ode | aa5a0f417a2218bd471db754b35cc61996b2461e | [
"BSD-3-Clause"
] | 37 | 2020-11-07T08:53:49.000Z | 2021-12-24T00:01:16.000Z | nbkode/testsuite/__init__.py | Yash-10/numbakit-ode | aa5a0f417a2218bd471db754b35cc61996b2461e | [
"BSD-3-Clause"
] | 24 | 2020-11-04T02:05:28.000Z | 2022-03-28T21:14:08.000Z | nbkode/testsuite/__init__.py | Yash-10/numbakit-ode | aa5a0f417a2218bd471db754b35cc61996b2461e | [
"BSD-3-Clause"
] | 4 | 2020-12-24T09:19:50.000Z | 2022-03-04T16:45:49.000Z | """
nbkode.testsuite
~~~~~~~~~~~~~~~~
numbakit-ode (nbkode) is a Python package to solve
**ordinary differential equations (ODE)** that uses
numba to compile code and therefore speed up calculations.
:copyright: 2020 by nbkode Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
def run():
"""Run all tests."""
try:
import pytest
except ImportError:
print("pytest not installed. Install it\n pip install pytest")
raise
return pytest.main()
| 23.041667 | 73 | 0.631103 |
794668844cba4edba1c2cca522fe26e29ee1021e | 695 | py | Python | mar-crawlers/common/crawler_common.py | chema0/mar | a1ed70744603d1f9c838042054537849745b1c31 | [
"Apache-2.0"
] | 2 | 2021-06-22T13:59:25.000Z | 2022-01-29T18:53:26.000Z | mar-crawlers/common/crawler_common.py | chema0/mar | a1ed70744603d1f9c838042054537849745b1c31 | [
"Apache-2.0"
] | 8 | 2021-05-03T13:36:30.000Z | 2022-02-07T08:35:40.000Z | mar-crawlers/common/crawler_common.py | chema0/mar | a1ed70744603d1f9c838042054537849745b1c31 | [
"Apache-2.0"
] | 1 | 2022-01-27T10:00:23.000Z | 2022-01-27T10:00:23.000Z | import os
import sqlite3
def model_already_exists(model_id, c):
c.execute('SELECT model_id FROM data WHERE model_id = ?', [model_id])
value = c.fetchone()
return value is not None
def open_db(output_folder, database_name, smash=False):
db = os.path.join(output_folder, database_name)
if smash and os.path.exists(db):
os.remove(db)
conn = sqlite3.connect(db)
c = conn.cursor()
schema = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../schema/crawlerdb.sql')
with open(schema, 'r') as file:
stms = file.read().split(";")
for stm in stms:
c.execute(stm)
conn.commit()
return conn
| 26.730769 | 96 | 0.627338 |
794668cfb9071cf09d37fd9222b275cd3d1245c8 | 472 | py | Python | mayan/apps/appearance/migrations/0005_theme_font_other.py | TheerapatLoinok/Mayan-EDMS | dc06cc7a57aa2fe6ea97575d0196df4950e63975 | [
"Apache-2.0"
] | null | null | null | mayan/apps/appearance/migrations/0005_theme_font_other.py | TheerapatLoinok/Mayan-EDMS | dc06cc7a57aa2fe6ea97575d0196df4950e63975 | [
"Apache-2.0"
] | null | null | null | mayan/apps/appearance/migrations/0005_theme_font_other.py | TheerapatLoinok/Mayan-EDMS | dc06cc7a57aa2fe6ea97575d0196df4950e63975 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.24 on 2022-03-20 09:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appearance', '0004_auto_20220320_0950'),
]
operations = [
migrations.AddField(
model_name='theme',
name='font_other',
field=models.CharField(blank=True, help_text='Name font from Google Font.', max_length=300, verbose_name='Font other'),
),
]
| 24.842105 | 131 | 0.631356 |
79466977235c674fdc21cfda2762af2723e5757d | 10,544 | py | Python | build_tools/bazel_to_cmake/bazel_to_cmake_targets.py | yzhang93/iree | 382fc631b663dee0b5bb60c617525e97880d5ece | [
"Apache-2.0"
] | null | null | null | build_tools/bazel_to_cmake/bazel_to_cmake_targets.py | yzhang93/iree | 382fc631b663dee0b5bb60c617525e97880d5ece | [
"Apache-2.0"
] | null | null | null | build_tools/bazel_to_cmake/bazel_to_cmake_targets.py | yzhang93/iree | 382fc631b663dee0b5bb60c617525e97880d5ece | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import re
# Bazel to CMake target name conversions used by bazel_to_cmake.py.
EXPLICIT_TARGET_MAPPING = {
# Internal utilities to emulate various binary/library options.
"//build_tools:default_linkopts": [],
"//build_tools:dl": ["${CMAKE_DL_LIBS}"],
"//compiler/src:defs": [],
"//compiler/src/iree/compiler/API:CAPI": ["IREECompilerCAPILib"],
"//runtime/src:runtime_defines": [],
# IREE llvm-external-projects
"//llvm-external-projects/iree-dialects:IREEPyDMTransforms": [
"IREEPyDMPasses"
],
# Disable all hard-coded codegen targets (they are expanded dynamically
# in CMake).
"@llvm-project//llvm:AArch64AsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:AArch64CodeGen": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:ARMAsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:ARMCodeGen": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:RISCVAsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:RISCVCodeGen": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:WebAssemblyAsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:WebAssemblyCodeGen": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:X86AsmParser": ["IREELLVMCPUTargetDeps"],
"@llvm-project//llvm:X86CodeGen": ["IREELLVMCPUTargetDeps"],
# LLVM
"@llvm-project//llvm:config": [],
"@llvm-project//llvm:IPO": ["LLVMipo"],
"@llvm-project//lld": ["${IREE_LLD_TARGET}"],
"@llvm-project//llvm:FileCheck": ["FileCheck"],
# MLIR
"@llvm-project//mlir:AllPassesAndDialects": ["MLIRAllDialects"],
"@llvm-project//mlir:AffineToStandardTransforms": ["MLIRAffineToStandard"],
"@llvm-project//mlir:ControlFlowOps": ["MLIRControlFlow"],
"@llvm-project//mlir:CFGTransforms": ["MLIRSCFToControlFlow"],
"@llvm-project//mlir:ComplexDialect": ["MLIRComplex"],
"@llvm-project//mlir:DialectUtils": [""],
"@llvm-project//mlir:GPUDialect": ["MLIRGPUOps"],
"@llvm-project//mlir:GPUTransforms": ["MLIRGPUTransforms"],
"@llvm-project//mlir:LinalgInterfaces": ["MLIRLinalg"],
"@llvm-project//mlir:LinalgStructuredOpsIncGen": [
"MLIRLinalgStructuredOpsIncGenLib"
],
"@llvm-project//mlir:LinalgOps": ["MLIRLinalg"],
"@llvm-project//mlir:LLVMDialect": ["MLIRLLVMIR"],
"@llvm-project//mlir:LLVMTransforms": ["MLIRFuncToLLVM"],
"@llvm-project//mlir:MathDialect": ["MLIRMath"],
"@llvm-project//mlir:ArithmeticDialect": ["MLIRArithmetic"],
"@llvm-project//mlir:BufferizationDialect": ["MLIRBufferization"],
"@llvm-project//mlir:MemRefDialect": ["MLIRMemRef"],
"@llvm-project//mlir:SCFToGPUPass": ["MLIRSCFToGPU"],
"@llvm-project//mlir:SCFDialect": ["MLIRSCF"],
"@llvm-project//mlir:FuncDialect": ["MLIRFunc"],
"@llvm-project//mlir:ShapeTransforms": ["MLIRShapeOpsTransforms"],
"@llvm-project//mlir:SideEffects": ["MLIRSideEffectInterfaces"],
"@llvm-project//mlir:SPIRVDialect": ["MLIRSPIRV"],
"@llvm-project//mlir:TosaDialect": ["MLIRTosa"],
"@llvm-project//mlir:ToLLVMIRTranslation": ["MLIRTargetLLVMIRExport"],
"@llvm-project//mlir:mlir-translate": ["mlir-translate"],
"@llvm-project//mlir:MlirTableGenMain": ["MLIRTableGen"],
"@llvm-project//mlir:MlirOptLib": ["MLIROptLib"],
"@llvm-project//mlir:Translation": ["MLIRTranslateLib"],
"@llvm-project//mlir:VectorOps": ["MLIRVector"],
"@llvm-project//mlir:TensorDialect": ["MLIRTensor"],
"@llvm-project//mlir:NVVMDialect": ["MLIRNVVMIR"],
"@llvm-project//mlir:ROCDLDialect": ["MLIRROCDLIR"],
"@llvm-project//mlir:PDLDialect": ["MLIRPDL"],
"@llvm-project//mlir:PDLInterpDialect": ["MLIRPDLInterp"],
# MHLO.
# TODO: Rework this upstream so that Bazel and CMake rules match up
# better.
# All of these have to depend on tensorflow::external_mhlo_includes to
# ensure that include directories are inherited.
"@mlir-hlo//:chlo_legalize_to_hlo": [
"tensorflow::external_mhlo_includes",
"ChloPasses",
],
"@mlir-hlo//:hlo": [
"tensorflow::external_mhlo_includes",
"ChloDialect",
"MhloDialect",
"MLIRMhloUtils",
],
"@mlir-hlo//:hlo_legalize_shape_ops_to_standard": [
"tensorflow::external_mhlo_includes",
"MhloShapeOpsToStandard",
],
"@mlir-hlo//:hlo_legalize_to_arithmetic": [
"tensorflow::external_mhlo_includes",
"MhloToArithmeticConversion",
],
"@mlir-hlo//:hlo_legalize_to_lhlo": [
"tensorflow::external_mhlo_includes",
"MhloToLhloConversion",
],
"@mlir-hlo//:hlo_legalize_to_memref": [
"tensorflow::external_mhlo_includes",
"MhloToMemrefConversion",
],
"@mlir-hlo//:legalize_control_flow": [
"tensorflow::external_mhlo_includes",
"MhloToStandard",
],
"@mlir-hlo//:legalize_einsum_to_dot_general": [
"tensorflow::external_mhlo_includes",
"MhloPasses",
],
"@mlir-hlo//:legalize_gather_to_torch_index_select": [
"tensorflow::external_mhlo_includes",
"MhloPasses",
],
"@mlir-hlo//:legalize_to_linalg": [
"tensorflow::external_mhlo_includes",
"MhloToLinalg",
],
"@mlir-hlo//:legalize_to_standard": [
"tensorflow::external_mhlo_includes",
"MhloToStandard",
],
"@mlir-hlo//:map_lmhlo_to_scalar_op": [
"tensorflow::external_mhlo_includes",
"LmhloDialect", # Unfortunate.
"MhloDialect",
],
"@mlir-hlo//:map_mhlo_to_scalar_op": [
"tensorflow::external_mhlo_includes",
"MhloDialect",
],
"@mlir-hlo//:materialize_broadcasts": [
"tensorflow::external_mhlo_includes",
"MhloPasses",
],
"@mlir-hlo//:mhlo_control_flow_to_scf": [
"tensorflow::external_mhlo_includes",
"MhloToStandard",
],
"@mlir-hlo//:mhlo_to_mhlo_lowering_patterns": [
"tensorflow::external_mhlo_includes",
"MhloPasses",
],
"@mlir-hlo//:unfuse_batch_norm": [
"tensorflow::external_mhlo_includes",
"MhloPasses",
],
# Torch-MLIR.
"@torch-mlir-dialects//:TorchMLIRTMTensorDialect": [
"TorchMLIRTMTensorDialect"
],
# Vulkan
"@vulkan_headers": ["Vulkan::Headers"],
# The Bazel target maps to the IMPORTED target defined by FindVulkan().
"@vulkan_sdk//:sdk": ["Vulkan::Vulkan"],
# Misc single targets
"@com_google_benchmark//:benchmark": ["benchmark"],
"@com_github_dvidelabs_flatcc//:flatcc": ["flatcc"],
"@com_github_dvidelabs_flatcc//:parsing": ["flatcc::parsing"],
"@com_github_dvidelabs_flatcc//:runtime": ["flatcc::runtime"],
"@com_github_yaml_libyaml//:yaml": ["yaml"],
"@com_google_googletest//:gtest": ["gmock", "gtest"],
"@spirv_cross//:spirv_cross_lib": ["spirv-cross-msl"],
"@cpuinfo": ["${IREE_CPUINFO_TARGET}"],
"@vulkan_memory_allocator//:impl_header_only": ["vulkan_memory_allocator"],
}
def _convert_mlir_target(target):
# Default to a pattern substitution approach.
# Take "MLIR" and append the name part of the full target identifier, e.g.
# "@llvm-project//mlir:IR" -> "MLIRIR"
# "@llvm-project//mlir:Pass" -> "MLIRPass"
return ["MLIR" + target.rsplit(":")[-1]]
def _convert_llvm_target(target):
# Default to a pattern substitution approach.
# Prepend "LLVM" to the Bazel target name.
# "@llvm-project//llvm:AsmParser" -> "LLVMAsmParser"
# "@llvm-project//llvm:Core" -> "LLVMCore"
return ["LLVM" + target.rsplit(":")[-1]]
def _convert_iree_dialects_target(target):
# Just take the target name as-is.
return [target.rsplit(":")[-1]]
def _convert_to_cmake_path(bazel_path_fragment: str) -> str:
cmake_path = bazel_path_fragment
# Bazel `//iree/base` -> CMake `iree::base`
# Bazel `//iree/base:foo` -> CMake `iree::base::foo`
if cmake_path.startswith("//"):
cmake_path = cmake_path[len("//"):]
cmake_path = cmake_path.replace(":", "::") # iree/base::foo or ::foo
cmake_path = cmake_path.replace("/", "::") # iree::base
return cmake_path
def convert_target(target):
"""Converts a Bazel target to a list of CMake targets.
IREE targets are expected to follow a standard form between Bazel and CMake
that facilitates conversion. External targets *may* have their own patterns,
or they may be purely special cases.
Multiple target in Bazel may map to a single target in CMake and a Bazel
target may map to multiple CMake targets.
Returns:
A list of converted targets if it was successfully converted.
Raises:
KeyError: No conversion was found for the target.
"""
if target in EXPLICIT_TARGET_MAPPING:
return EXPLICIT_TARGET_MAPPING[target]
if target.startswith("@llvm-project//llvm"):
return _convert_llvm_target(target)
if target.startswith("@llvm-project//mlir"):
return _convert_mlir_target(target)
if target.startswith("@"):
raise KeyError(f"No conversion found for target '{target}'")
if target.startswith("//llvm-external-projects/iree-dialects"):
return _convert_iree_dialects_target(target)
# IREE root paths map to package names based on explicit rules.
# * src/iree/ directories (compiler/src/iree/ and runtime/src/iree/)
# creating their own root paths by trimming down to just "iree"
# * tools/ uses an empty root, for binary targets names like "iree-compile"
# * other top level directories add back an 'iree' prefix
# If changing these, make the corresponding change in iree_macros.cmake
# (iree_package_ns function).
# Map //compiler/src/iree/(.*) -> iree::\1 (i.e. iree::compiler::\1)
m = re.match("^//compiler/src/iree/(.+)", target)
if m:
return ["iree::" + _convert_to_cmake_path(m.group(1))]
# Map //runtime/src/iree/(.*) -> iree::\1
m = re.match("^//runtime/src/iree/(.+)", target)
if m:
return ["iree::" + _convert_to_cmake_path(m.group(1))]
# Map //tools/(.*) -> \1
m = re.match("^//tools[/|:](.+)", target)
if m:
return [_convert_to_cmake_path(m.group(1))]
# Pass through package-relative targets
# :target_name
# file_name.txt
if target.startswith(":") or ":" not in target:
return [_convert_to_cmake_path(target)]
# Default rewrite: prefix with "iree::", without pruning the path.
return ["iree::" + _convert_to_cmake_path(target)]
| 39.051852 | 79 | 0.669006 |
79466bbf627e71517eba72d7b6d73462bd6d40b0 | 2,574 | py | Python | translate2/numberout.py | sdytkht/se2se | 91a5d69c15746a3f5b620d4dcc4a0b5783737702 | [
"Apache-2.0"
] | null | null | null | translate2/numberout.py | sdytkht/se2se | 91a5d69c15746a3f5b620d4dcc4a0b5783737702 | [
"Apache-2.0"
] | null | null | null | translate2/numberout.py | sdytkht/se2se | 91a5d69c15746a3f5b620d4dcc4a0b5783737702 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 28 00:02:08 2017
@author: kht
"""
import tensorflow as tf
import translate as tl
import numpy as np
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
einputs,dinputs,res_logits,all_attens=tl.self_decode()
einputs_t=[]
dinputs_t=[]
res_logits_t=[]
num_exp=len(res_logits)
for i in range(100):
einputs_t.append(einputs[num_exp-i-1])
dinputs_t.append(dinputs[num_exp-i-1])
res_logits_t.append(res_logits[num_exp-i-1])
batch_size=32
maxlen=13
sess = tf.InteractiveSession()
w_fc2 = weight_variable([128, 20])
b_fc2 = bias_variable([20])
x=tf.placeholder(tf.float32,[None,128])
y_=tf.placeholder(tf.float32,[None,20])
y_conv = tf.nn.softmax(tf.matmul(x, w_fc2) + b_fc2)
# train and evaluate the model
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
res=tf.argmax(y_conv, 1)
resreal=tf.argmax(y_, 1)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
init=tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess, "train/NumAdd.ckpt")
for i in range(len(res_logits_t)):
din=dinputs_t[i]
dlogit=res_logits_t[i]
'''
for j in range(batch_size):
batch_x=[]
batch_y=np.zeros([13,20],dtype=np.float32)
for k in range(maxlen):
batch_y[k][din[k][j]]=1
dx=dlogit[k][j]
batch_x.append(dx)
print(sess.run(correct_prediction,feed_dict={x: batch_x, y_: batch_y}))
print('-----------------------------------------------------------------------')
print("**************************************************************************************")
'''
for j in range(batch_size):
batch_x=[]
batch_y=np.zeros([13,20],dtype=np.float32)
for k in range(maxlen):
batch_y[k][din[k][j]]=1
dx=dlogit[k][j]
batch_x.append(dx)
print(sess.run(res,feed_dict={x: batch_x, y_: batch_y}))
print(sess.run(resreal,feed_dict={x: batch_x, y_: batch_y}))
print('-----------------------------------------------------------------------')
| 31.390244 | 103 | 0.571484 |
79466c05e9b6ed9b3c8657642cf7bc4f04c93bbf | 5,370 | py | Python | src/attentionwalk.py | erdiolmezogullari/AttentionWalk | d8c8297018374d965c0a024c3f1833f54347504e | [
"MIT"
] | 1 | 2021-03-16T10:22:03.000Z | 2021-03-16T10:22:03.000Z | src/attentionwalk.py | erdiolmezogullari/AttentionWalk | d8c8297018374d965c0a024c3f1833f54347504e | [
"MIT"
] | null | null | null | src/attentionwalk.py | erdiolmezogullari/AttentionWalk | d8c8297018374d965c0a024c3f1833f54347504e | [
"MIT"
] | null | null | null | """AttentionWalk class."""
import torch
import numpy as np
import pandas as pd
from tqdm import trange
from utils import read_graph, feature_calculator, adjacency_opposite_calculator
class AttentionWalkLayer(torch.nn.Module):
"""
Attention Walk Layer.
For details see the paper.
"""
def __init__(self, args, shapes):
"""
Setting up the layer.
:param args: Arguments object.
:param shapes: Shape of the target tensor.
"""
super(AttentionWalkLayer, self).__init__()
self.args = args
self.shapes = shapes
self.define_weights()
self.initialize_weights()
def define_weights(self):
"""
Define the model weights.
"""
half_dim = int(self.args.dimensions/2)
self.left_factors = torch.nn.Parameter(torch.Tensor(self.shapes[1], half_dim))
self.right_factors = torch.nn.Parameter(torch.Tensor(half_dim, self.shapes[1]))
self.attention = torch.nn.Parameter(torch.Tensor(self.shapes[0], 1))
def initialize_weights(self):
"""
Initializing the weights.
"""
torch.nn.init.uniform_(self.left_factors, -0.01, 0.01)
torch.nn.init.uniform_(self.right_factors, -0.01, 0.01)
torch.nn.init.uniform_(self.attention, -0.01, 0.01)
def forward(self, weighted_target_tensor, adjacency_opposite):
"""
Doing a forward propagation pass.
:param weighted_target_tensor: Target tensor factorized.
:param adjacency_opposite: No-edge indicator matrix.
:return loss: Loss being minimized.
"""
self.attention_probs = torch.nn.functional.softmax(self.attention, dim=0)
probs = self.attention_probs.unsqueeze(1).expand_as(weighted_target_tensor)
weighted_target_tensor = weighted_target_tensor * probs
weighted_tar_mat = torch.sum(weighted_target_tensor, dim=0)
weighted_tar_mat = weighted_tar_mat.view(self.shapes[1], self.shapes[2])
estimate = torch.mm(self.left_factors, self.right_factors)
loss_on_target = - weighted_tar_mat* torch.log(torch.sigmoid(estimate))
loss_opposite = -adjacency_opposite * torch.log(1-torch.sigmoid(estimate))
loss_on_mat = self.args.num_of_walks*weighted_tar_mat.shape[0]*loss_on_target+loss_opposite
abs_loss_on_mat = torch.abs(loss_on_mat)
average_loss_on_mat = torch.mean(abs_loss_on_mat)
norms = torch.mean(torch.abs(self.left_factors))+torch.mean(torch.abs(self.right_factors))
loss_on_regularization = self.args.beta * (self.attention.norm(2)**2)
loss = average_loss_on_mat + loss_on_regularization + self.args.gamma*norms
return loss
class AttentionWalkTrainer(object):
"""
Class for training the AttentionWalk model.
"""
def __init__(self, args):
"""
Initializing the training object.
:param args: Arguments object.
"""
self.args = args
self.graph = read_graph(self.args.edge_path)
self.initialize_model_and_features()
def initialize_model_and_features(self):
"""
Creating data tensors and factroization model.
"""
self.target_tensor = feature_calculator(self.args, self.graph)
self.target_tensor = torch.FloatTensor(self.target_tensor)
self.adjacency_opposite = adjacency_opposite_calculator(self.graph)
self.adjacency_opposite = torch.FloatTensor(self.adjacency_opposite)
self.model = AttentionWalkLayer(self.args, self.target_tensor.shape)
def fit(self):
"""
Fitting the model
"""
print("\nTraining the model.\n")
self.model.train()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
self.epochs = trange(self.args.epochs, desc="Loss")
for _ in self.epochs:
self.optimizer.zero_grad()
loss = self.model(self.target_tensor, self.adjacency_opposite)
loss.backward()
self.optimizer.step()
self.epochs.set_description("Attention Walk (Loss=%g)" % round(loss.item(), 4))
def save_model(self):
"""
Saving the embedding and attention vector.
"""
self.save_embedding()
self.save_attention()
def save_embedding(self):
"""
Saving the embedding matrices as one unified embedding.
"""
print("\nSaving the model.\n")
left = self.model.left_factors.detach().numpy()
right = self.model.right_factors.detach().numpy().T
indices = np.array([range(len(self.graph))]).reshape(-1, 1)
embedding = np.concatenate([indices, left, right], axis=1)
columns = ["id"] + ["x_" + str(x) for x in range(self.args.dimensions)]
embedding = pd.DataFrame(embedding, columns=columns)
embedding.to_csv(self.args.embedding_path, index=None)
def save_attention(self):
"""
Saving the attention vector.
"""
attention = self.model.attention_probs.detach().numpy()
indices = np.array([range(self.args.window_size)]).reshape(-1, 1)
attention = np.concatenate([indices, attention], axis=1)
attention = pd.DataFrame(attention, columns=["Order", "Weight"])
attention.to_csv(self.args.attention_path, index=None)
| 40.37594 | 99 | 0.654376 |
79466cd9ff0a80345912027feabb7685aaa75f85 | 3,985 | py | Python | homeassistant/components/homekit/const.py | henrikstumpf/home-assistant | e00e65a8938214b16a583cd44601a9729cad2873 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/homekit/const.py | henrikstumpf/home-assistant | e00e65a8938214b16a583cd44601a9729cad2873 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/homekit/const.py | henrikstumpf/home-assistant | e00e65a8938214b16a583cd44601a9729cad2873 | [
"Apache-2.0"
] | null | null | null | """Constants used be the HomeKit component."""
# #### MISC ####
DEBOUNCE_TIMEOUT = 0.5
DOMAIN = 'homekit'
HOMEKIT_FILE = '.homekit.state'
HOMEKIT_NOTIFY_ID = 4663548
# #### CONFIG ####
CONF_AUTO_START = 'auto_start'
CONF_ENTITY_CONFIG = 'entity_config'
CONF_FILTER = 'filter'
# #### CONFIG DEFAULTS ####
DEFAULT_AUTO_START = True
DEFAULT_PORT = 51827
# #### HOMEKIT COMPONENT SERVICES ####
SERVICE_HOMEKIT_START = 'start'
# #### STRING CONSTANTS ####
BRIDGE_MODEL = 'homekit.bridge'
BRIDGE_NAME = 'Home Assistant'
MANUFACTURER = 'HomeAssistant'
# #### Categories ####
CATEGORY_ALARM_SYSTEM = 'ALARM_SYSTEM'
CATEGORY_GARAGE_DOOR_OPENER = 'GARAGE_DOOR_OPENER'
CATEGORY_LIGHT = 'LIGHTBULB'
CATEGORY_LOCK = 'DOOR_LOCK'
CATEGORY_SENSOR = 'SENSOR'
CATEGORY_SWITCH = 'SWITCH'
CATEGORY_THERMOSTAT = 'THERMOSTAT'
CATEGORY_WINDOW_COVERING = 'WINDOW_COVERING'
# #### Services ####
SERV_ACCESSORY_INFO = 'AccessoryInformation'
SERV_AIR_QUALITY_SENSOR = 'AirQualitySensor'
SERV_CARBON_DIOXIDE_SENSOR = 'CarbonDioxideSensor'
SERV_CARBON_MONOXIDE_SENSOR = 'CarbonMonoxideSensor'
SERV_CONTACT_SENSOR = 'ContactSensor'
SERV_GARAGE_DOOR_OPENER = 'GarageDoorOpener'
SERV_HUMIDITY_SENSOR = 'HumiditySensor' # CurrentRelativeHumidity
SERV_LEAK_SENSOR = 'LeakSensor'
SERV_LIGHT_SENSOR = 'LightSensor'
SERV_LIGHTBULB = 'Lightbulb' # On | Brightness, Hue, Saturation, Name
SERV_LOCK = 'LockMechanism'
SERV_MOTION_SENSOR = 'MotionSensor'
SERV_OCCUPANCY_SENSOR = 'OccupancySensor'
SERV_SECURITY_SYSTEM = 'SecuritySystem'
SERV_SMOKE_SENSOR = 'SmokeSensor'
SERV_SWITCH = 'Switch'
SERV_TEMPERATURE_SENSOR = 'TemperatureSensor'
SERV_THERMOSTAT = 'Thermostat'
SERV_WINDOW_COVERING = 'WindowCovering' # CurrentPosition, TargetPosition
# #### Characteristics ####
CHAR_AIR_PARTICULATE_DENSITY = 'AirParticulateDensity'
CHAR_AIR_QUALITY = 'AirQuality'
CHAR_BRIGHTNESS = 'Brightness' # Int | [0, 100]
CHAR_CARBON_DIOXIDE_DETECTED = 'CarbonDioxideDetected'
CHAR_CARBON_DIOXIDE_LEVEL = 'CarbonDioxideLevel'
CHAR_CARBON_DIOXIDE_PEAK_LEVEL = 'CarbonDioxidePeakLevel'
CHAR_CARBON_MONOXIDE_DETECTED = 'CarbonMonoxideDetected'
CHAR_COLOR_TEMPERATURE = 'ColorTemperature'
CHAR_CONTACT_SENSOR_STATE = 'ContactSensorState'
CHAR_COOLING_THRESHOLD_TEMPERATURE = 'CoolingThresholdTemperature'
CHAR_CURRENT_AMBIENT_LIGHT_LEVEL = 'CurrentAmbientLightLevel'
CHAR_CURRENT_DOOR_STATE = 'CurrentDoorState'
CHAR_CURRENT_HEATING_COOLING = 'CurrentHeatingCoolingState'
CHAR_CURRENT_POSITION = 'CurrentPosition' # Int | [0, 100]
CHAR_CURRENT_HUMIDITY = 'CurrentRelativeHumidity' # percent
CHAR_CURRENT_SECURITY_STATE = 'SecuritySystemCurrentState'
CHAR_CURRENT_TEMPERATURE = 'CurrentTemperature'
CHAR_HEATING_THRESHOLD_TEMPERATURE = 'HeatingThresholdTemperature'
CHAR_HUE = 'Hue' # arcdegress | [0, 360]
CHAR_LEAK_DETECTED = 'LeakDetected'
CHAR_LOCK_CURRENT_STATE = 'LockCurrentState'
CHAR_LOCK_TARGET_STATE = 'LockTargetState'
CHAR_LINK_QUALITY = 'LinkQuality'
CHAR_MANUFACTURER = 'Manufacturer'
CHAR_MODEL = 'Model'
CHAR_MOTION_DETECTED = 'MotionDetected'
CHAR_NAME = 'Name'
CHAR_OCCUPANCY_DETECTED = 'OccupancyDetected'
CHAR_ON = 'On' # boolean
CHAR_SATURATION = 'Saturation' # percent
CHAR_SERIAL_NUMBER = 'SerialNumber'
CHAR_SMOKE_DETECTED = 'SmokeDetected'
CHAR_TARGET_DOOR_STATE = 'TargetDoorState'
CHAR_TARGET_HEATING_COOLING = 'TargetHeatingCoolingState'
CHAR_TARGET_POSITION = 'TargetPosition' # Int | [0, 100]
CHAR_TARGET_SECURITY_STATE = 'SecuritySystemTargetState'
CHAR_TARGET_TEMPERATURE = 'TargetTemperature'
CHAR_TEMP_DISPLAY_UNITS = 'TemperatureDisplayUnits'
# #### Properties ####
PROP_CELSIUS = {'minValue': -273, 'maxValue': 999}
# #### Device Class ####
DEVICE_CLASS_CO2 = 'co2'
DEVICE_CLASS_GAS = 'gas'
DEVICE_CLASS_HUMIDITY = 'humidity'
DEVICE_CLASS_LIGHT = 'light'
DEVICE_CLASS_MOISTURE = 'moisture'
DEVICE_CLASS_MOTION = 'motion'
DEVICE_CLASS_OCCUPANCY = 'occupancy'
DEVICE_CLASS_OPENING = 'opening'
DEVICE_CLASS_PM25 = 'pm25'
DEVICE_CLASS_SMOKE = 'smoke'
DEVICE_CLASS_TEMPERATURE = 'temperature'
| 35.265487 | 74 | 0.80276 |
79466e23aeafa959d32511e8edb023f437d93028 | 277 | py | Python | Arrays/main_3.py | RobertElias/PythonProjects | 9dcf24bdd6b31ad94cfab6cf81caf7fdc0f8023a | [
"MIT"
] | null | null | null | Arrays/main_3.py | RobertElias/PythonProjects | 9dcf24bdd6b31ad94cfab6cf81caf7fdc0f8023a | [
"MIT"
] | null | null | null | Arrays/main_3.py | RobertElias/PythonProjects | 9dcf24bdd6b31ad94cfab6cf81caf7fdc0f8023a | [
"MIT"
] | null | null | null | #3. Write a Python program to append a new item to the end of the array.
from array import *
array_num = array('i', [1, 3, 5, 7, 9])
print("Original array: "+str(array_num))
print("Append 11 at the end of the array:")
array_num.append(11)
print("New array: "+str(array_num))
| 27.7 | 72 | 0.693141 |
79466e6c9cfe618bc3ad8c5db7d3bfc53e668c1a | 56 | py | Python | noo/impl/packager/__init__.py | nooproject/noo | 238711c55faeb1226a4e5339cd587a312c4babac | [
"MIT"
] | 2 | 2022-02-03T07:35:46.000Z | 2022-02-03T16:12:25.000Z | noo/impl/packager/__init__.py | nooproject/noo | 238711c55faeb1226a4e5339cd587a312c4babac | [
"MIT"
] | 2 | 2022-03-05T02:31:38.000Z | 2022-03-05T21:26:42.000Z | noo/impl/packager/__init__.py | nooproject/noo | 238711c55faeb1226a4e5339cd587a312c4babac | [
"MIT"
] | 1 | 2022-03-05T01:40:29.000Z | 2022-03-05T01:40:29.000Z | from .packager import Packager
__all__ = ("Packager",)
| 14 | 30 | 0.732143 |
7946707c223dc1fbfd89022bf077131c28a0dbf0 | 1,571 | py | Python | game/my.py | senhordaluz/jc1-python | 7ed14199147dcc74c970740b670a2da22d91e550 | [
"MIT"
] | null | null | null | game/my.py | senhordaluz/jc1-python | 7ed14199147dcc74c970740b670a2da22d91e550 | [
"MIT"
] | null | null | null | game/my.py | senhordaluz/jc1-python | 7ed14199147dcc74c970740b670a2da22d91e550 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 20 21:07:10 2017
@author: Pedro da Luz
"""
import pygame
pygame.mixer.pre_init(44100,-16,2, 1024)
pygame.init()
GAME_NAME = "SUPER SMASH ARANHA-MORCEGO"
SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = [800,600]
FPS = 30
FPSCLOCK = pygame.time.Clock()
muted = False
# Colours R G B ALPHA
WHITE = (255, 255, 255, 255)
BLACK = ( 0, 0, 0, 255)
RED = (230, 70, 70, 255)
BRIGHTRED = (255, 0, 0, 255)
DARKRED = (220, 0, 0, 255)
BLUE = ( 0, 0, 255, 255)
SKYBLUE = (135, 206, 250, 255)
YELLOW = (255, 250, 17, 255)
GREEN = (110, 255, 100, 255)
ORANGE = (255, 165, 0, 255)
DARKGREEN = ( 60, 160, 60, 255)
DARKGREY = ( 60, 60, 60, 255)
LIGHTGREY = (180, 180, 180, 255)
BROWN = (139, 69, 19, 255)
DARKBROWN = (100, 30, 0, 255)
BROWNBLACK= ( 50, 0, 0, 255)
GREYBROWN = (160, 110, 90, 255)
CREAM = (255, 255, 204, 255)
COLOURKEY = ( 1, 2, 3, 255)
BLUETRANS = ( 0, 0, 255, 100)
# Artes
ARTE_MAPAS_PATH = 'data/arte/fundo/'
ARTE_PORTAL_PATH = 'data/arte/portal/'
ARTE_MENU_PATH = 'data/arte/menu/'
ARTE_ARAQUESL_PATH = 'data/arte/ataques/'
ARTE_ICONE_PATH = 'data/arte/icone/'
ARTE_JOGADOR1_PATH = 'data/arte/personagens/jogador1/'
ARTE_JOGADOR2_PATH = 'data/arte/personagens/jogador2/'
ARTE_BOSS01_PATH = 'data/arte/personagens/boss01/'
ARTE_VILAO01_PATH = 'data/arte/personagens/vilao01/'
ARTE_VILAO02_PATH = 'data/arte/personagens/vilao03/'
ARTE_VILAO03_PATH = 'data/arte/personagens/vilao03/'
ARTE_VILAO04_PATH = 'data/arte/personagens/vilao04/' | 29.092593 | 54 | 0.642266 |
79467145daba1d832ff132b59b7a6cb22dc1057d | 1,829 | py | Python | giantstar/utils/fileReader.py | DaoSen-v/gaintstar | 5ecae8652c9f71aaa78cc0fd181d431cb130cab1 | [
"MIT"
] | 1 | 2021-08-06T08:32:15.000Z | 2021-08-06T08:32:15.000Z | giantstar/utils/fileReader.py | DaoSen-v/gaintstar | 5ecae8652c9f71aaa78cc0fd181d431cb130cab1 | [
"MIT"
] | null | null | null | giantstar/utils/fileReader.py | DaoSen-v/gaintstar | 5ecae8652c9f71aaa78cc0fd181d431cb130cab1 | [
"MIT"
] | null | null | null | # _*_encoding=utf8_*_
# @Time : 2021/6/10 16:07
# @Author : xuyong
# @Email: [email protected]
import json
from typing import List
import pymysql
import pandas
from giantstar.globalSetting import plus_setting
from giantstar.utils.error import DatabaseNoSuchData, DataIndexError
class ExcelReader:
files = plus_setting.DATA_FILES
@classmethod
def get_excel_data(cls, index: str, use:str ='default', index_col="dataid"):
sheet, _id = cls.refresh_index(index)
df = pandas.read_excel(cls.files.get(use), sheet_name=sheet, index_col=index_col, keep_default_na=False)
data = df.loc[_id].to_dict()
if isinstance(data["host"], dict):
raise DataIndexError(f"【数据索引】data index error,index must be unique. but get many index result")
for k, v in data.items():
if k in ["headers", "request", "assert", "extract"]:
data[k] = json.loads(v) if v else {}
return data
@classmethod
def refresh_index(cls, index):
index_list:List = index.split('.')
if len(index_list) == 2:
return index_list
raise DataIndexError(f"【数据索引】data index error,expect get 'a.b', but get '{index}'.")
class MysqlReader:
database_config = plus_setting.DATABASE
@classmethod
def get_mysql_data(cls,sql, use='default'):
"""
获取数据库一行数据
:param use: 使用数据库配置名
:param sql: 执行的sql语句
:return: dict
"""
connect = pymysql.connect(**cls.database_config.get(use))
cursor = connect.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute(sql)
res = cursor.fetchone()
cursor.close()
connect.close()
if res: return res
raise DatabaseNoSuchData(f"【数据查询错误】数据库不存在该条数据, sql={sql}")
class YamlReader:
pass
| 29.5 | 112 | 0.642428 |
794672c51fdb6f056ee83a094f02ea0d8a1c79da | 2,128 | py | Python | networks/custom_net.py | catch-n-release/drig | 1290e60839e6b45ce2c429d2ed9eb4dcf0678755 | [
"BSD-3-Clause"
] | null | null | null | networks/custom_net.py | catch-n-release/drig | 1290e60839e6b45ce2c429d2ed9eb4dcf0678755 | [
"BSD-3-Clause"
] | null | null | null | networks/custom_net.py | catch-n-release/drig | 1290e60839e6b45ce2c429d2ed9eb4dcf0678755 | [
"BSD-3-Clause"
] | null | null | null | from keras.models import Input, Model
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Activation, Dense, Flatten, Dropout
from keras.layers.normalization import BatchNormalization
from keras import backend
from drig.config import Kernel, PoolSize, Padding, Trigger
class CustomNet:
@staticmethod
def compose(
height: int,
width: int,
depth: int,
classes: int = None,
filters: list = (16, 32, 64),
regressor: bool = False,
):
try:
input_cast = (
height,
width,
depth,
)
channel_index = -1
if backend.image_data_format() == "channels_first":
input_cast = (
depth,
height,
width,
)
channel_index = 1
influx = Input(shape=input_cast)
tensor = influx
for filter_size in filters:
tensor = Conv2D(
filter_size,
Kernel.MESH_3x3,
padding=Padding.SAME,
)(tensor)
tensor = Activation(Trigger.RELU)(tensor)
tensor = BatchNormalization(axis=channel_index)(tensor)
tensor = MaxPooling2D(pool_size=PoolSize.MESH_2x2)(tensor)
##################
tensor = Flatten()(tensor)
tensor = Dense(16)(tensor)
tensor = Activation(Trigger.RELU)(tensor)
tensor = BatchNormalization(axis=channel_index)(tensor)
tensor = Dropout(0.5)(tensor)
#################
tensor = Dense(4)(tensor)
tensor = Activation(Trigger.RELU)(tensor)
###############
if regressor:
tensor = Dense(1)(tensor)
tensor = Activation(Trigger.LINEAR)(tensor)
###############
net = Model(influx, tensor)
return net
except Exception as e:
raise e
| 29.971831 | 74 | 0.493421 |
794672ea296bb020aa0144860b234562f2fc1187 | 426 | py | Python | dashboard_app/tests/mixins.py | bitlabstudio/django-dashboard-app | ed98f2bca91a4ced36d0dd1aa1baee78e989cf64 | [
"MIT"
] | 10 | 2017-03-21T01:31:37.000Z | 2021-07-14T02:54:50.000Z | dashboard_app/tests/mixins.py | bitlabstudio/django-dashboard-app | ed98f2bca91a4ced36d0dd1aa1baee78e989cf64 | [
"MIT"
] | 2 | 2015-04-14T09:15:09.000Z | 2015-04-18T07:52:40.000Z | dashboard_app/tests/mixins.py | bitmazk/django-dashboard-app | ed98f2bca91a4ced36d0dd1aa1baee78e989cf64 | [
"MIT"
] | 5 | 2015-04-14T09:22:29.000Z | 2016-10-11T08:21:21.000Z | """Mixins for the tests of the dashboard_app."""
from ..widget_pool import dashboard_widget_pool
class WidgetTestCaseMixin(object):
"""
Mixin that makes sure to unregister widgets leftover from other tests.
"""
def _unregister_widgets(self):
# unregister all widgets that might be leftover from other tests
dashboard_widget_pool.widgets = {}
dashboard_widget_pool.discovered = False
| 30.428571 | 74 | 0.725352 |
794673b6ef346afe8c7e6fa0eede154c467613d5 | 973 | py | Python | mayo/override/quantize/base.py | deep-fry/mayo | 7211a11fdb9bb0a036d496a3eba16c96db122f89 | [
"MIT"
] | 110 | 2018-06-07T17:52:29.000Z | 2022-03-28T08:04:02.000Z | mayo/override/quantize/base.py | kypomon/mayo | 7211a11fdb9bb0a036d496a3eba16c96db122f89 | [
"MIT"
] | 6 | 2019-10-17T12:00:29.000Z | 2021-10-21T13:41:22.000Z | mayo/override/quantize/base.py | kypomon/mayo | 7211a11fdb9bb0a036d496a3eba16c96db122f89 | [
"MIT"
] | 22 | 2018-07-05T01:30:49.000Z | 2021-10-19T06:15:40.000Z | from mayo.override import util
from mayo.override.base import OverriderBase
class QuantizerBase(OverriderBase):
@property
def real_width(self):
raise NotImplementedError(
'Override this method to compute real bit-width '
'required for {!r}.'.format(self))
def eval(self, attribute):
if util.is_tensor(attribute):
return self.session.run(attribute)
return attribute
def _quantize(self, value, **kwargs):
raise NotImplementedError(
'Override this method to perform quantization.')
@staticmethod
def _overflow_rate(mask):
"""
Compute overflow_rate from a given overflow mask. Here `mask` is a
boolean tensor where True and False represent the presence and absence
of overflow repsectively.
"""
return util.sum(util.cast(mask, int)) / util.count(mask)
def _apply(self, value):
return self._quantize(value)
| 30.40625 | 78 | 0.652621 |
794673cc4300c071fbbd29a81a8c49c86f2ffa81 | 4,191 | py | Python | nova/objects/instance_fault.py | rossella/nova | 9b1180d5a09227604a470cb9d0790b57daf1b6d7 | [
"Apache-2.0"
] | null | null | null | nova/objects/instance_fault.py | rossella/nova | 9b1180d5a09227604a470cb9d0790b57daf1b6d7 | [
"Apache-2.0"
] | null | null | null | nova/objects/instance_fault.py | rossella/nova | 9b1180d5a09227604a470cb9d0790b57daf1b6d7 | [
"Apache-2.0"
] | 1 | 2020-07-24T06:47:54.000Z | 2020-07-24T06:47:54.000Z | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields
from nova.openstack.common.gettextutils import _LE
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InstanceFault(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added create()
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'code': fields.IntegerField(),
'message': fields.StringField(nullable=True),
'details': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, fault, db_fault):
# NOTE(danms): These are identical right now
for key in fault.fields:
fault[key] = db_fault[key]
fault._context = context
fault.obj_reset_changes()
return fault
@base.remotable_classmethod
def get_latest_for_instance(cls, context, instance_uuid):
db_faults = db.instance_fault_get_by_instance_uuids(context,
[instance_uuid])
if instance_uuid in db_faults and db_faults[instance_uuid]:
return cls._from_db_object(context, cls(),
db_faults[instance_uuid][0])
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
values = {
'instance_uuid': self.instance_uuid,
'code': self.code,
'message': self.message,
'details': self.details,
'host': self.host,
}
db_fault = db.instance_fault_create(context, values)
self._from_db_object(context, self, db_fault)
self.obj_reset_changes()
# Cells should only try sending a message over to nova-cells
# if cells is enabled and we're not the API cell. Otherwise,
# if the API cell is calling this, we could end up with
# infinite recursion.
if cells_opts.get_cell_type() == 'compute':
try:
cells_rpcapi.CellsAPI().instance_fault_create_at_top(
context, db_fault)
except Exception:
LOG.exception(_LE("Failed to notify cells of instance fault"))
class InstanceFaultList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceFault <= version 1.1
# Version 1.1: InstanceFault version 1.2
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('InstanceFault'),
}
child_versions = {
'1.0': '1.1',
# NOTE(danms): InstanceFault was at 1.1 before we added this
'1.1': '1.2',
}
@base.remotable_classmethod
def get_by_instance_uuids(cls, context, instance_uuids):
db_faultdict = db.instance_fault_get_by_instance_uuids(context,
instance_uuids)
db_faultlist = itertools.chain(*db_faultdict.values())
return base.obj_make_list(context, InstanceFaultList(), InstanceFault,
db_faultlist)
| 37.756757 | 78 | 0.629205 |
79467489fd6ab097d14550f7af3746da3c6065ce | 156 | py | Python | tfc_web/bikes/urls.py | SmartCambridge/tfc_web | ac16b3c2aa5200320e9ffa9d270fb409b98ed55d | [
"MIT"
] | 2 | 2018-10-28T20:15:23.000Z | 2019-03-29T09:06:09.000Z | tfc_web/bikes/urls.py | SmartCambridge/tfc_web | ac16b3c2aa5200320e9ffa9d270fb409b98ed55d | [
"MIT"
] | 107 | 2018-10-22T06:57:07.000Z | 2020-09-15T14:43:03.000Z | tfc_web/bikes/urls.py | SmartCambridge/tfc_web | ac16b3c2aa5200320e9ffa9d270fb409b98ed55d | [
"MIT"
] | 1 | 2020-03-20T19:49:29.000Z | 2020-03-20T19:49:29.000Z | from django.conf.urls import url
from bikes.views import current_bikes
urlpatterns = [
url(r'^current-bikes', current_bikes, name='current-bikes'),
]
| 19.5 | 64 | 0.74359 |
7946770183dfa7d7ecb250c446eb17e463d5921e | 5,584 | py | Python | tests/gmprocess/metrics/imc/radial_transverse_test.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | 54 | 2019-01-12T02:05:38.000Z | 2022-03-29T19:43:56.000Z | tests/gmprocess/metrics/imc/radial_transverse_test.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | 700 | 2018-12-18T19:44:31.000Z | 2022-03-30T20:54:28.000Z | tests/gmprocess/metrics/imc/radial_transverse_test.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | 41 | 2018-11-29T23:17:56.000Z | 2022-03-31T04:04:23.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# stdlib imports
import os
# third party imports
import numpy as np
import scipy.constants as sp
from obspy import read, read_inventory
from obspy.geodetics import gps2dist_azimuth
from obspy.core.event import Origin
import pkg_resources
# local imports
from gmprocess.metrics.station_summary import StationSummary
from gmprocess.metrics.exception import PGMException
from gmprocess.core.stationstream import StationStream
from gmprocess.core.stationtrace import StationTrace
ddir = os.path.join("data", "testdata", "fdsnfetch")
datadir = pkg_resources.resource_filename("gmprocess", ddir)
def test_radial_transverse():
origin = Origin(latitude=47.149, longitude=-122.7266667)
st = read(os.path.join(datadir, "resp_cor", "UW.ALCT.--.*.MSEED"))
st[0].stats.standard = {}
st[0].stats.standard["horizontal_orientation"] = 0.0
st[0].stats["channel"] = "HN1"
st[1].stats.standard = {}
st[1].stats.standard["horizontal_orientation"] = 90.0
st[1].stats["channel"] = "HN2"
st[2].stats.standard = {}
st[2].stats.standard["horizontal_orientation"] = np.nan
st[2].stats["channel"] = "HNZ"
inv = read_inventory(os.path.join(datadir, "inventory.xml"))
stalat, stalon = inv[0][0][0].latitude, inv[0][0][0].longitude
for i, tr in enumerate(st):
tr.stats["coordinates"] = {"latitude": stalat}
tr.stats["coordinates"]["longitude"] = stalon
tr.stats["standard"].update(
{
"corner_frequency": np.nan,
"station_name": "",
"source": "json",
"instrument": "",
"instrument_period": np.nan,
"vertical_orientation": np.nan,
"source_format": "json",
"comments": "",
"structure_type": "",
"source_file": "",
"sensor_serial_number": "",
"process_level": "raw counts",
"process_time": "",
"units": "acc",
"units_type": "acc",
"instrument_sensitivity": np.nan,
"instrument_damping": np.nan,
}
)
baz = gps2dist_azimuth(stalat, stalon, origin.latitude, origin.longitude)[1]
st1 = st.copy()
st1[0].stats.channel = st1[0].stats.channel[:-1] + "N"
st1[1].stats.channel = st1[1].stats.channel[:-1] + "E"
st1.rotate(method="NE->RT", back_azimuth=baz)
pgms = np.abs(st1.max())
st2 = StationStream([])
for t in st:
st2.append(StationTrace(t.data, t.stats))
for tr in st2:
response = {"input_units": "counts", "output_units": "cm/s^2"}
tr.setProvenance("remove_response", response)
summary = StationSummary.from_stream(st2, ["radial_transverse"], ["pga"], origin)
pgmdf = summary.pgms
R = pgmdf.loc["PGA", "HNR"].Result
T = pgmdf.loc["PGA", "HNT"].Result
np.testing.assert_almost_equal(pgms[0], sp.g * R)
np.testing.assert_almost_equal(pgms[1], sp.g * T)
# Test with a station whose channels are not aligned to E-N
SEW_st = read(os.path.join(datadir, "resp_cor", "GS.SEW.*.mseed"))
SEW_inv = read_inventory(os.path.join(datadir, "inventory_sew.xml"))
stalat, stalon = inv[0][0][0].latitude, inv[0][0][0].longitude
# This needs to be checked. The target data doesn't appear to be
# correct. This can be updated when a tolerance is added to the rotate
# method.
"""traces = []
for tr in SEW_st:
tr.stats.coordinates = {'latitude': stalat,
'longitude': stalon}
tr.stats.standard = {'corner_frequency': np.nan,
'station_name': '',
'source': 'json',
'instrument': '',
'instrument_period': np.nan,
'source_format': 'json',
'comments': '',
'structure_type': '',
'sensor_serial_number': '',
'process_level': 'raw counts',
'process_time': '',
'horizontal_orientation':
SEW_inv.get_channel_metadata(tr.get_id())['azimuth'],
'units': 'acc',
'instrument_damping': np.nan}
traces += [StationTrace(tr.data, tr.stats)]
baz = gps2dist_azimuth(stalat, stalon,
origin.latitude, origin.longitude)[1]
SEW_st_copy = StationStream(traces)
SEW_st_copy.rotate(method='->NE', inventory=SEW_inv)
SEW_st_copy.rotate(method='NE->RT', back_azimuth=baz)
pgms = np.abs(SEW_st_copy.max())
summary = StationSummary.from_stream(
SEW_st, ['radial_transverse'], ['pga'], origin)
np.testing.assert_almost_equal(
pgms[1], sp.g * summary.pgms['PGA']['R'])
np.testing.assert_almost_equal(
pgms[2], sp.g * summary.pgms['PGA']['T'])"""
# Test failure case without two horizontal channels
copy1 = st2.copy()
copy1[0].stats.channel = copy1[0].stats.channel[:-1] + "3"
pgms = StationSummary.from_stream(
copy1, ["radial_transverse"], ["pga"], origin
).pgms
assert np.isnan(pgms.loc["PGA", "HNR"].Result)
assert np.isnan(pgms.loc["PGA", "HNT"].Result)
# Test failure case when channels are not orthogonal
copy3 = st2.copy()
copy3[0].stats.standard.horizontal_orientation = 100
pgms = StationSummary.from_stream(
copy3, ["radial_transverse"], ["pga"], origin
).pgms
assert np.isnan(pgms.loc["PGA", "HNR"].Result)
assert np.isnan(pgms.loc["PGA", "HNT"].Result)
if __name__ == "__main__":
test_radial_transverse()
| 35.566879 | 85 | 0.60351 |
79467844b687f0d95b51c377133cfb9372a99e3a | 2,831 | py | Python | wikipedia-data-examples/win_unicode_console/readline_hook.py | jtmorgan/ds4ux | 14c4ece59b367fe7c8db09a126161693b9a640b3 | [
"MIT"
] | null | null | null | wikipedia-data-examples/win_unicode_console/readline_hook.py | jtmorgan/ds4ux | 14c4ece59b367fe7c8db09a126161693b9a640b3 | [
"MIT"
] | null | null | null | wikipedia-data-examples/win_unicode_console/readline_hook.py | jtmorgan/ds4ux | 14c4ece59b367fe7c8db09a126161693b9a640b3 | [
"MIT"
] | null | null | null |
import sys, traceback
from ctypes import pythonapi, cdll, c_size_t, c_char_p, c_void_p, cast, CFUNCTYPE, POINTER, addressof
PyMem_Malloc = pythonapi.PyMem_Malloc
PyMem_Malloc.restype = c_size_t
PyMem_Malloc.argtypes = [c_size_t]
strncpy = cdll.msvcrt.strncpy
strncpy.restype = c_char_p
strncpy.argtypes = [c_char_p, c_char_p, c_size_t]
HOOKFUNC = CFUNCTYPE(c_char_p, c_void_p, c_void_p, c_char_p)
PyOS_ReadlineFunctionPointer = c_void_p.in_dll(pythonapi, "PyOS_ReadlineFunctionPointer")
def new_zero_terminated_string(b):
p = PyMem_Malloc(len(b) + 1)
strncpy(cast(p, c_char_p), b, len(b) + 1)
return p
class ReadlineHookManager:
def __init__(self):
self.readline_wrapper_ref = HOOKFUNC(self.readline_wrapper)
self.address = c_void_p.from_address(addressof(self.readline_wrapper_ref)).value
self.original_address = PyOS_ReadlineFunctionPointer.value
self.readline_hook = None
def readline_wrapper(self, stdin, stdout, prompt):
try:
try:
if sys.stdin.encoding != sys.stdout.encoding:
raise ValueError("sys.stdin.encoding != sys.stdout.encoding, readline hook doesn't know, which one to use to decode prompt")
except ValueError:
traceback.print_exc(file=sys.stderr)
try:
prompt = prompt.decode("utf-8")
except UnicodeDecodeError:
prompt = ""
else:
prompt = prompt.decode(sys.stdout.encoding)
try:
line = self.readline_hook(prompt)
except KeyboardInterrupt:
return 0
else:
return new_zero_terminated_string(line.encode(sys.stdin.encoding))
except:
print("Intenal win_unicode_console error", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
return new_zero_terminated_string(b"\n")
def install_hook(self, hook):
self.readline_hook = hook
PyOS_ReadlineFunctionPointer.value = self.address
def restore_original(self):
self.readline_hook = None
PyOS_ReadlineFunctionPointer.value = self.original_address
def readline(prompt):
sys.stdout.write(prompt)
sys.stdout.flush()
return sys.stdin.readline()
class PyReadlineManager:
def __init__(self):
self.original_codepage = pyreadline.unicode_helper.pyreadline_codepage
def set_codepage(self, codepage):
pyreadline.unicode_helper.pyreadline_codepage = codepage
def restore_original(self):
self.set_codepage(self.original_codepage)
try:
import pyreadline.unicode_helper
except ImportError:
pyreadline = None
else:
pyreadline_manager = PyReadlineManager()
manager = ReadlineHookManager()
def enable(*, use_pyreadline=True):
if use_pyreadline and pyreadline:
pyreadline_manager.set_codepage(sys.stdin.encoding)
# pyreadline assumes that encoding of all sys.stdio objects is the same
else:
manager.install_hook(readline)
def disable():
if pyreadline:
pyreadline_manager.restore_original()
manager.restore_original()
| 26.457944 | 129 | 0.767573 |
79467853dcd6f36ad5d8951182550364b5da7bbf | 18,776 | py | Python | ravop/core/__init__.py | 7enTropy7/ravop | e8741adac086ed2727cb9e9fb6fec15dae297573 | [
"MIT"
] | null | null | null | ravop/core/__init__.py | 7enTropy7/ravop | e8741adac086ed2727cb9e9fb6fec15dae297573 | [
"MIT"
] | null | null | null | ravop/core/__init__.py | 7enTropy7/ravop | e8741adac086ed2727cb9e9fb6fec15dae297573 | [
"MIT"
] | null | null | null | import ast
import imp
import json
import sys
import os
import time
from functools import wraps
import numpy as np
import speedtest
from ..globals import globals as g
from ..config import FTP_SERVER_URL
from ..strings import OpTypes, NodeTypes, functions, OpStatus
from ..utils import make_request, convert_to_ndarray, dump_data
from .ftp_client import get_client, check_credentials
ftp_client = None
ftp_username = None
ftp_password = None
def initialize(token):
global ftp_client, ftp_username, ftp_password
print("Creating FTP developer credentials...")
create_endpoint = f"ravop/developer/add/?token={token}"
res = make_request(create_endpoint, "get")
res = res.json()
username = res['username']
password = res['password']
time.sleep(2)
if FTP_SERVER_URL != "localhost" and FTP_SERVER_URL != "0.0.0.0":
wifi = speedtest.Speedtest()
upload_speed = int(wifi.upload())
upload_speed = upload_speed / 8
if upload_speed <= 3000000:
upload_multiplier = 1
elif upload_speed < 80000000:
upload_multiplier = int((upload_speed/80000000) * 1000)
else:
upload_multiplier = 1000
g.ftp_upload_blocksize = 8192 * upload_multiplier
else:
g.ftp_upload_blocksize = 8192 * 1000
print("FTP Upload Blocksize: ", g.ftp_upload_blocksize)
ftp_client = get_client(username=username, password=password)
ftp_username = username
ftp_password = password
def t(value, dtype="ndarray", **kwargs):
"""
To create scalars, tensors and other data types
"""
if dtype == "ndarray":
if isinstance(value, int):
return Scalar(value, **kwargs)
elif isinstance(value, float):
return Scalar(value, **kwargs)
else:
return Tensor(value, **kwargs)
elif dtype == "file":
return File(value=value, dtype=dtype, **kwargs)
def create_op(operator=None, *args, **params):
return __create_math_op(operator=operator, *args, **params)
def epsilon():
return Scalar(1e-07)
def one():
return Scalar(1)
def minus_one():
return Scalar(-1)
def inf():
return Scalar(np.inf)
def pi():
return Scalar(np.pi)
def __create_math_op(*args, **kwargs):
params = dict()
for key, value in kwargs.items():
if key in ["node_type", "op_type", "status", "name", "operator"]:
continue
if (
isinstance(value, Op)
or isinstance(value, Data)
or isinstance(value, Scalar)
or isinstance(value, Tensor)
):
params[key] = value.id
elif type(value).__name__ in ["int", "float"]:
params[key] = Scalar(value).id
elif isinstance(value, list) or isinstance(value, tuple):
params[key] = Tensor(value).id
elif type(value).__name__ == "str":
params[key] = value
elif isinstance(value, bool):
params[key] = value
if len(args) == 0:
op_ids = None
op_type = None
node_type = None
else:
op_ids = []
for op in args:
op_ids.append(op.id)
if len(op_ids) == 1:
op_type = OpTypes.UNARY
elif len(op_ids) == 2:
op_type = OpTypes.BINARY
else:
op_type = None
node_type = NodeTypes.MIDDLE
if op_ids is not None:
op_ids = json.dumps(op_ids)
node_type = kwargs.get("node_type", node_type)
op_type = kwargs.get("op_type", op_type)
status = kwargs.get("status", OpStatus.PENDING)
operator = kwargs.get("operator", None)
complexity = kwargs.get("complexity", None)
op = make_request("op/create/", "post", {
"name": kwargs.get("name", None),
"graph_id": g.graph_id,
"subgraph_id": g.sub_graph_id,
"node_type": node_type,
"inputs": op_ids,
"outputs": None,
"op_type": op_type,
"operator": operator,
"status": status,
"complexity": complexity,
"params": json.dumps(params),
})
op = op.json()
op = Op(id=op["id"])
if g.eager_mode:
op.wait_till_computed()
return op
class ParentClass(object):
def __init__(self, id=None, **kwargs):
self._error = None
self._status_code = None
if id is not None:
self.__get(endpoint=self.get_endpoint)
else:
self.__create(self.create_endpoint, **kwargs)
def fetch_update(self):
self.__get(self.get_endpoint)
def __get(self, endpoint):
# print('ENDPOINT: ',endpoint)
res = make_request(endpoint, "get")
# print("Response:GET:", res.json())
status_code = res.status_code
res = res.json()
if status_code == 200:
for k, v in res.items():
self.__dict__[k] = v
self._status_code = 200
else:
self._error = res['message']
self._status_code = status_code
def __create(self, endpoint, **kwargs):
res = make_request(endpoint, "post", payload={**kwargs})
# print("Response:POST:", res.text, kwargs)
status_code = res.status_code
res = res.json()
if status_code == 200:
# Set class attributes
for k, v in res.items():
self.__dict__[k] = v
self._status_code = 200
else:
self._error = res['message']
self._status_code = status_code
@property
def error(self):
if hasattr(self, "_error"):
return self._error
return None
@property
def status_code(self):
if hasattr(self, "_status_code"):
return self._status_code
return None
def valid(self):
if self.status_code == 200:
return True
else:
return False
class Op(ParentClass):
def __init__(self, id=None, **kwargs):
self.get_endpoint = f"op/get/?id={id}"
self.create_endpoint = f"op/create/"
if id is not None:
super().__init__(id=id)
else:
inputs = kwargs.get("inputs", None)
outputs = kwargs.get("outputs", None)
operator = kwargs.get("operator", None)
if (inputs is not None or outputs is not None) and operator is not None:
info = self.extract_info(**kwargs)
info['graph_id'] = g.graph_id
info['subgraph_id'] = g.sub_graph_id
info['params'] = json.dumps(kwargs)
info["name"] = kwargs.get("name", None)
super().__init__(id, **info)
def wait_till_computed(self):
print('Waiting for Op id: ', self.id)
while self.get_status() != 'computed':
if self.fetch_retries() == "failed":
end_endpoint = f"graph/end/?id={g.graph_id}"
res = make_request(end_endpoint, "get")
print("\n------------------------------")
print(res.json()['message'])
self.fetch_update()
print("Error: ",self.message)
sys.exit()
time.sleep(0.1)
sys.stdout.write("\033[F") # back to previous line
sys.stdout.write("\033[K") # clear line
def fetch_retries(self):
res = make_request(f"graph/get/?id={g.graph_id}", "get").json()['status']
return res
def get_status(self):
return make_request(f"op/status/?id={self.id}", "get").json()['op_status']
def extract_info(self, **kwargs):
inputs = kwargs.get("inputs", None)
outputs = kwargs.get("outputs", None)
operator = kwargs.get("operator", None)
# Figure out node type
if inputs is None and outputs is not None:
node_type = NodeTypes.INPUT
elif inputs is not None and outputs is None:
node_type = NodeTypes.MIDDLE
else:
raise Exception("Invalid node type")
if inputs is not None:
if len(inputs) == 1:
op_type = OpTypes.UNARY
elif len(inputs) == 2:
op_type = OpTypes.BINARY
else:
raise Exception("Invalid number of inputs")
else:
op_type = OpTypes.OTHER
if outputs is None:
status = OpStatus.PENDING
else:
status = OpStatus.COMPUTED
inputs = json.dumps(inputs)
outputs = json.dumps(outputs)
return {
"node_type": node_type,
"op_type": op_type,
"status": status,
"inputs": inputs,
"outputs": outputs,
"operator": operator
}
def get_output(self):
return self.get_data().get_value()
def get_dtype(self):
return self.get_data().get_dtype()
def get_shape(self):
return self.get_data().get_shape()
def get_data(self):
if self.outputs is None or self.outputs == "null":
return None
data_id = json.loads(self.outputs)[0]
data = Data(id=data_id)
return data
def __str__(self):
return (
"Op:\nId:{}\nName:{}\nType:{}\nOperator:{}\n\nStatus:{}\n".format(
self.id,
self.name,
self.op_type,
self.operator,
self.status,
)
)
def __call__(self, *args, **kwargs):
self.wait_till_computed()
self.fetch_update()
temp = make_request(f"global/subgraph/update/id/?graph_id={g.graph_id}", "get").json()['global_subgraph_id']
g.sub_graph_id = temp + 1
return self.get_output()
def __add__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __mul__(self, other):
return mul(self, other)
def __pos__(self):
return pos(self)
def __neg__(self):
return neg(self)
def __abs__(self):
return abs(self)
def __invert__(self):
return inv(self)
def __lt__(self, other):
return less(self, other)
def __le__(self, other):
return less_equal(self, other)
def __eq__(self, other):
return equal(self, other)
def __ne__(self, other):
return not_equal(self, other)
def __ge__(self, other):
return greater_equal(self, other)
def __getitem__(self, item):
if type(item).__name__ == "slice":
return self.slice(begin=item.start, size=item.stop - item.start)
elif type(item).__name__ == "int":
return self.gather(Scalar(item))
elif type(item).__name__ == "tuple":
var = self
for i in item:
var = var.gather(Scalar(i))
return var
def add_method(cls):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
return func(self, *args, **kwargs)
setattr(cls, func.__name__, wrapper)
# Note we are not binding func, but wrapper which accepts self but does exactly the same as func
return func # returning func means func can still be used normally
return decorator
for key, value in functions.items():
exec(
"""@add_method(Op)\ndef {}(*args, **kwargs):\n return __create_math_op(*args, operator="{}", **kwargs)""".format(
key, key
)
)
class Scalar(Op):
def __init__(self, value=None, id=None, data=None, **kwargs):
if id is not None:
# Get
super().__init__(id=id)
elif data is not None:
if data.valid():
# Create scalar
super().__init__(
operator="lin", inputs=None, outputs=[data.id], **kwargs
)
else:
self.__dict__['_status_code'] = 400
self.__dict__['_error'] = "Invalid data"
elif value is not None:
# Create data and then op
data = Data(value=value)
if data.valid():
super().__init__(
operator="lin", inputs=None, outputs=[data.id], **kwargs
)
else:
self.__dict__['_status_code'] = 400
self.__dict__['_error'] = "Invalid data"
def __str__(self):
return "Scalar Op:\nId:{}\nOutput:{}\nStatus:{}\nDtype:{}\n".format(
self.id, self.get_output(), self.status, self.get_dtype
)
def __float__(self):
return float(self.get_output())
class Tensor(Op):
"""
It supports:
1. list
2. ndarray
3. string(list)
"""
def __init__(self, value=None, id=None, data=None, **kwargs):
if id is not None:
# Get
super().__init__(id=id)
elif data is not None:
if data.valid():
# Create scalar
super().__init__(
operator="lin", inputs=None, outputs=[data.id], **kwargs
)
else:
self.__dict__['_status_code'] = 400
self.__dict__['_error'] = "Invalid data"
elif value is not None:
# Create data and then op
data = Data(value=value)
if data.valid():
super().__init__(
operator="lin", inputs=None, outputs=[data.id], **kwargs
)
else:
self.__dict__['_status_code'] = 400
self.__dict__['_error'] = "Invalid data"
def __str__(self):
return "Tensor Op:\nId:{}\nOutput:{}\nStatus:{}\nDtype:{}".format(
self.id, self.get_output(), self.status, self.get_dtype
)
class File(Op):
def __init__(self, value, **kwargs):
data = Data(value=value, dtype="file")
super().__init__(
operator="lin", inputs=None, outputs=[data.id], **kwargs
)
@property
def dtype(self):
return "file"
@property
def shape(self):
return None
def __str__(self):
return "File Op:\nId:{}\nOutput:{}\nStatus:{}\nDtype:{}\n".format(
self.id, self.get_output(), self.status, self.dtype
)
def __call__(self, *args, **kwargs):
return self.get_output()
class Data(ParentClass):
def __init__(self, value=None, id=None, **kwargs):
global ftp_client
self.get_endpoint = f"data/get/?id={id}"
self.create_endpoint = f"data/create/"
# value = kwargs.get("value", None)
# if value is not None and isinstance(value, np.ndarray):
# value = value.tolist()
# kwargs['value'] = value
# else:
# kwargs['value'] = value
if id is None:
if value is not None:
value = convert_to_ndarray(value)
byte_size = value.size * value.itemsize
if byte_size > 0 * 1000000:
dtype = value.dtype
kwargs['dtype'] = str(dtype)
kwargs['username'] = ftp_username
# if kwargs.get("value", None) is not None:
# kwargs['value'] = "uploaded in FTP"
else:
dtype = value.dtype
kwargs['dtype'] = str(dtype)
kwargs['value'] = value.tolist()
kwargs['username'] = ftp_username
super().__init__(id, **kwargs)
# print("Username and password: ", ftp_username, ftp_password)
# print("Check ftp creds: ",check_credentials(ftp_username,ftp_password))
if id is None:
if value is not None and byte_size > 0 * 1000000:
#value = convert_to_ndarray(value)
file_path = dump_data(self.id, value)
ftp_client.upload(file_path, os.path.basename(file_path))
# print("\nFile uploaded!", file_path)
os.remove(file_path)
def __call__(self, *args, **kwargs):
self.fetch_update()
return self.get_value()
def get_value(self):
if hasattr(self, 'value'):
return convert_to_ndarray(self.value)
else:
return None
def get_dtype(self):
if hasattr(self, "dtype"):
return self.dtype
else:
return None
def get_shape(self):
if hasattr(self, 'value'):
if self.value is not None:
return self.value.shape
return None
class Graph(ParentClass):
"""A class to represent a graph object"""
def __init__(self, id=None, **kwargs):
self.get_graph_id_endpoint = f"graph/get/graph_id"
res = make_request(self.get_graph_id_endpoint, "get")
g.graph_id = res.json()["graph_id"]
if id is None:
id = g.graph_id + 1
self.my_id = id - 1
g.sub_graph_id = 1
else:
self.my_id = id
self.get_endpoint = f"graph/get/?id={id}"
self.create_endpoint = f"graph/create/"
if id is not None and id <= g.graph_id:
super().__init__(id=id)
else:
super().__init__(**kwargs)
# def add(self, op):
# """Add an op to the graph"""
# op.add_to_graph(self.id)
@property
def progress(self):
get_progress_endpoint = f"graph/op/get/progress/?id={self.my_id}"
res = make_request(get_progress_endpoint, "get")
return res.json()['progress']
def end(self):
"""End the graph"""
end_endpoint = f"graph/end/?id={self.my_id}"
res = make_request(end_endpoint, "get")
print('\n')
print(res.json()['message'])
return res.json()['message']
def get_op_stats(self):
"""Get stats of all ops"""
get_op_stats_endpoint = f"graph/op/get/stats/?id={self.my_id}"
res = make_request(get_op_stats_endpoint, "get")
return res.json()
# def clean(self):
# ravdb.delete_graph_ops(self._graph_db.id)
@property
def ops(self):
"""Get all ops associated with a graph"""
get_graph_ops_endpoint = f"graph/op/get/?id={self.my_id}"
res = make_request(get_graph_ops_endpoint, "get")
res = res.json()
return res
def get_ops_by_name(self, op_name, graph_id=None):
get_ops_by_name_endpoint = f"graph/op/name/get/?op_name={op_name}&id={graph_id}"
res = make_request(get_ops_by_name_endpoint, "get")
res = res.json()
return res
def __str__(self):
return "Graph:\nId:{}\nStatus:{}\n".format(self.id, self.status)
| 29.200622 | 124 | 0.551555 |
794678a4bf3e65fe14a5544b156af7162396e46a | 21,394 | py | Python | front_panel_settings.py | labscript-suite-temp-2-archive/lkohfahl-blacs--forked-from--labscript_suite-blacs | 6deae2cff7baec77d01f91bc07123c26c7e01c5c | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | front_panel_settings.py | labscript-suite-temp-2-archive/lkohfahl-blacs--forked-from--labscript_suite-blacs | 6deae2cff7baec77d01f91bc07123c26c7e01c5c | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | front_panel_settings.py | labscript-suite-temp-2-archive/lkohfahl-blacs--forked-from--labscript_suite-blacs | 6deae2cff7baec77d01f91bc07123c26c7e01c5c | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | #####################################################################
# #
# /front_panel_settings.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
from labscript_utils.numpy_dtype_workaround import dtype_workaround
if PY2:
str = unicode
import os
import logging
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
import labscript_utils.excepthook
import numpy
import labscript_utils.h5_lock, h5py
from qtutils import *
from labscript_utils.connections import ConnectionTable
logger = logging.getLogger('BLACS.FrontPanelSettings')
def _ensure_str(s):
"""convert bytestrings and numpy strings to python strings"""
return s.decode() if isinstance(s, bytes) else str(s)
class FrontPanelSettings(object):
def __init__(self,settings_path,connection_table):
self.settings_path = settings_path
self.connection_table = connection_table
with h5py.File(settings_path,'a') as h5file:
pass
def setup(self,blacs):
self.tablist = blacs.tablist
self.attached_devices = blacs.attached_devices
self.notebook = blacs.tab_widgets
self.window = blacs.ui
self.panes = blacs.panes
self.blacs = blacs
def restore(self):
# Get list of DO/AO
# Does the object have a name?
# yes: Then, find the device in the BLACS connection table that matches that name
# Also Find the device in the saved connection table.
# Do the connection table entries match?
# yes: Restore as is
# no: Is it JUST the parent device and "connected to" that has changed?
# yes: Restore to new device
# no: Show error that this device could not be restored
# no: Ok, so it isn't in the saved connection table
# Does this device/channel exist in the BLACS connection table?
# yes: Don't restore, show error that this chanel is now in use by a new device
# Give option to restore anyway...
# no: Restore as is
#
# Display errors, give option to cancel starting of BLACS so that the connection table can be edited
# Create saved connection table
settings = {}
question = {}
error = {}
tab_data = {'BLACS settings':{}}
try:
saved_ct = ConnectionTable(self.settings_path, logging_prefix='BLACS', exceptions_in_thread=True)
ct_match,error = self.connection_table.compare_to(saved_ct)
with h5py.File(self.settings_path,'r') as hdf5_file:
# Get Tab Data
dataset = hdf5_file['/front_panel'].get('_notebook_data',[])
for row in dataset:
tab_name = _ensure_str(row['tab_name'])
tab_data.setdefault(tab_name,{})
try:
tab_data[tab_name] = {'notebook':row['notebook'], 'page':row['page'], 'visible':row['visible'], 'data':eval(_ensure_str(row['data']))}
except:
logger.info("Could not load tab data for %s"%tab_name)
#now get dataset attributes
tab_data['BLACS settings'] = dict(dataset.attrs)
# Get the front panel values
if 'front_panel' in hdf5_file["/front_panel"]:
dataset = hdf5_file["/front_panel"].get('front_panel', [])
for row in dataset:
result = self.check_row(row,ct_match,self.connection_table,saved_ct)
columns = ['name', 'device_name', 'channel', 'base_value', 'locked', 'base_step_size', 'current_units']
data_dict = {}
for i in range(len(row)):
if isinstance(row[i], bytes) or isinstance(row[i], str):
data_dict[columns[i]] = _ensure_str(row[i])
else:
data_dict[columns[i]] = row[i]
settings,question,error = self.handle_return_code(data_dict,result,settings,question,error)
# Else Legacy restore from GTK save data!
else:
# open Datasets
type_list = ["AO", "DO", "DDS"]
for key in type_list:
dataset = hdf5_file["/front_panel"].get(key, [])
for row in dataset:
result = self.check_row(row,ct_match,self.connection_table,saved_ct)
columns = ['name', 'device_name', 'channel', 'base_value', 'locked', 'base_step_size', 'current_units']
data_dict = {}
for i in range(len(row)):
data_dict[columns[i]] = row[i]
settings,question,error = self.handle_return_code(data_dict,result,settings,question,error)
except Exception as e:
logger.info("Could not load saved settings")
logger.info(str(e))
return settings,question,error,tab_data
def handle_return_code(self,row,result,settings,question,error):
# 1: Restore to existing device
# 2: Send to new device
# 3: Device now exists, use saved values from unnamed device?
# Note that if 2 has happened, 3 will also happen
# This is because we have the original channel, and the moved channel in the same place
#-1: Device no longer in the connection table, throw error
#-2: Device parameters not compatible, throw error
if type(result) == tuple:
connection = result[1]
result = result[0]
if result == 1:
settings.setdefault(row['device_name'],{})
settings[row['device_name']][row['channel']] = row
elif result == 2:
settings.setdefault(connection.parent.name,{})
settings[connection.parent.name][connection.parent_port] = row
elif result == 3:
question.setdefault(connection.parent.name,{})
question[connection.parent.name][connection.parent_port] = row
elif result == -1:
error[row['device_name']+'_'+row['channel']] = row,"missing"
elif result == -2:
error[row['device_name']+'_'+row['channel']] = row,"changed"
return settings,question,error
def check_row(self,row,ct_match,blacs_ct,saved_ct):
# If it has a name
if row[0] != "-":
if ct_match:
# Restore
return 1
else:
# Find if this device is in the connection table
connection = blacs_ct.find_by_name(row[0])
connection2 = saved_ct.find_by_name(row[0])
if connection:
# compare the two connections, see what differs
# if compare fails only on parent, connected to:
# send to new parent
# else:
# show error, device parameters not compatible with saved data
result,error = connection.compare_to(connection2)
allowed_length = 0
if "parent_port" in error:
allowed_length += 1
if len(error) > allowed_length:
return -2 # failure, device parameters not compatible
elif error == {} and connection.parent.name == connection2.parent.name:
return 1 # All details about this device match
else:
return 2,connection # moved to new device
else:
# no longer in connection table, throw error
return -1
else:
# It doesn't have a name
# Does the channel exist for this device in the connection table
connection = blacs_ct.find_child(row[1],row[2])
if connection:
# throw error, device now exists, should we restore?
return 3,connection
else:
# restore to device
return 1
@inmain_decorator(wait_for_return=True)
def get_save_data(self):
tab_data = {}
notebook_data = {}
window_data = {}
plugin_data = {}
# iterate over all tabs
for device_name,tab in self.tablist.items():
tab_data[device_name] = {'front_panel':tab.settings['front_panel_settings'], 'save_data': tab.get_all_save_data()}
# Find the notebook the tab is in
#
# By default we assume it is in notebook0, on page 0. This way, if a tab gets lost somewhere,
# and isn't found to be a child of any notebook we know about,
# it will revert back to notebook 1 when the file is loaded upon program restart!
current_notebook_name = 0
page = 0
visible = False
for notebook_name,notebook in self.notebook.items():
if notebook.indexOf(tab._ui) != -1:
current_notebook_name = notebook_name
page = notebook.indexOf(tab._ui)
visible = True if notebook.currentIndex() == page else False
break
notebook_data[device_name] = {"notebook":current_notebook_name,"page":page, "visible":visible}
# iterate over all plugins
for module_name, plugin in self.blacs.plugins.items():
try:
plugin_data[module_name] = plugin.get_save_data()
except Exception as e:
logger.error('Could not save data for plugin %s. Error was: %s'%(module_name,str(e)))
# save window data
# Size of window
window_data["_main_window"] = {"width":self.window.normalGeometry().width(),
"height":self.window.normalGeometry().height(),
"xpos":self.window.normalGeometry().x(),
"ypos":self.window.normalGeometry().y(),
"maximized":self.window.isMaximized(),
"frame_height":abs(self.window.frameGeometry().height()-self.window.normalGeometry().height()),
"frame_width":abs(self.window.frameGeometry().width()-self.window.normalGeometry().width()),
"_analysis":self.blacs.analysis_submission.get_save_data(),
"_queue":self.blacs.queue.get_save_data(),
}
# Pane positions
for name,pane in self.panes.items():
window_data[name] = pane.sizes()
return tab_data,notebook_data,window_data,plugin_data
@inmain_decorator(wait_for_return=True)
def save_front_panel_to_h5(self,current_file,states,tab_positions,window_data,plugin_data,silent = {}, force_new_conn_table = False):
# Save the front panel!
# Does the file exist?
# Yes: Check connection table inside matches current connection table. Does it match?
# Yes: Does the file have a front panel already saved in it?
# Yes: Can we overwrite?
# Yes: Delete front_panel group, save new front panel
# No: Create error dialog!
# No: Save front panel in here
#
# No: Return
# No: Create new file, place inside the connection table and front panel
if os.path.isfile(current_file):
save_conn_table = True if force_new_conn_table else False
result = False
if not save_conn_table:
try:
new_conn = ConnectionTable(current_file)
result,error = self.connection_table.compare_to(new_conn)
except:
# no connection table is present, so also save the connection table!
save_conn_table = True
# if save_conn_table is True, we don't bother checking to see if the connection tables match, because save_conn_table is only true when the connection table doesn't exist in the current file
# As a result, if save_conn_table is True, we ignore connection table checking, and save the connection table in the h5file.
if save_conn_table or result:
with h5py.File(current_file,'r+') as hdf5_file:
if hdf5_file['/'].get('front_panel') != None:
# Create a dialog to ask whether we can overwrite!
overwrite = False
if not silent:
message = QMessageBox()
message.setText("This file '%s' already contains a connection table."%current_file)
message.setInformativeText("Do you wish to replace the existing front panel configuration in this file?")
message.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
message.setDefaultButton(QMessageBox.No)
message.setIcon(QMessageBox.Question)
message.setWindowTitle("BLACS")
resp = message.exec_()
if resp == QMessageBox.Yes :
overwrite = True
else:
overwrite = silent["overwrite"]
if overwrite:
# Delete Front panel group, save new front panel
del hdf5_file['/front_panel']
self.store_front_panel_in_h5(hdf5_file,states,tab_positions,window_data,plugin_data,save_conn_table)
else:
if not silent:
message = QMessageBox()
message.setText("Front Panel not saved.")
message.setIcon(QMessageBox.Information)
message.setWindowTitle("BLACS")
message.exec_()
else:
logger.info("Front Panel not saved as it already existed in the h5 file '"+current_file+"'")
return
else:
# Save Front Panel in here
self.store_front_panel_in_h5(hdf5_file,states,tab_positions,window_data,plugin_data,save_conn_table)
else:
# Create Error dialog (invalid connection table)
if not silent:
message = QMessageBox()
message.setText("The Front Panel was not saved as the file selected contains a connection table which is not a subset of the BLACS connection table.")
message.setIcon(QMessageBox.Information)
message.setWindowTitle("BLACS")
message.exec_()
else:
logger.info("Front Panel not saved as the connection table in the h5 file '"+current_file+"' didn't match the current connection table.")
return
else:
with h5py.File(current_file,'w') as hdf5_file:
# save connection table, save front panel
self.store_front_panel_in_h5(hdf5_file,states,tab_positions,window_data,plugin_data,save_conn_table=True)
@inmain_decorator(wait_for_return=True)
def store_front_panel_in_h5(self, hdf5_file,tab_data,notebook_data,window_data,plugin_data,save_conn_table=False,save_queue_data=True):
if save_conn_table:
if 'connection table' in hdf5_file:
del hdf5_file['connection table']
hdf5_file.create_dataset('connection table', data=self.connection_table.raw_table)
data_group = hdf5_file['/'].create_group('front_panel')
front_panel_list = []
other_data_list = []
front_panel_dtype = dtype_workaround([('name','a256'),('device_name','a256'),('channel','a256'),('base_value',float),('locked',bool),('base_step_size',float),('current_units','a256')])
max_od_length = 2 # empty dictionary
# Iterate over each device within a class
for device_name, device_state in tab_data.items():
logger.debug("saving front panel for device:" + device_name)
# Insert front panel data into dataset
for hardware_name, data in device_state["front_panel"].items():
if data != {}:
front_panel_list.append((data['name'],
device_name,
hardware_name,
data['base_value'],
data['locked'],
data['base_step_size'] if 'base_step_size' in data else 0,
data['current_units'] if 'current_units' in data else ''
)
)
# Save "other data"
od = repr(device_state["save_data"])
other_data_list.append(od)
max_od_length = len(od) if len(od) > max_od_length else max_od_length
# Create datasets
if front_panel_list:
front_panel_array = numpy.empty(len(front_panel_list),dtype=front_panel_dtype)
for i, row in enumerate(front_panel_list):
front_panel_array[i] = row
data_group.create_dataset('front_panel',data=front_panel_array)
# Save tab data
i = 0
tab_data = numpy.empty(len(notebook_data),dtype=dtype_workaround([('tab_name','a256'),('notebook','a2'),('page',int),('visible',bool),('data','a'+str(max_od_length))]))
for device_name,data in notebook_data.items():
tab_data[i] = (device_name,data["notebook"],data["page"],data["visible"],other_data_list[i])
i += 1
# Save BLACS Main GUI Info
dataset = data_group.create_dataset("_notebook_data",data=tab_data)
dataset.attrs["window_width"] = window_data["_main_window"]["width"]
dataset.attrs["window_height"] = window_data["_main_window"]["height"]
dataset.attrs["window_xpos"] = window_data["_main_window"]["xpos"]
dataset.attrs["window_ypos"] = window_data["_main_window"]["ypos"]
dataset.attrs["window_maximized"] = window_data["_main_window"]["maximized"]
dataset.attrs["window_frame_height"] = window_data["_main_window"]["frame_height"]
dataset.attrs["window_frame_width"] = window_data["_main_window"]["frame_width"]
dataset.attrs['plugin_data'] = repr(plugin_data)
dataset.attrs['analysis_data'] = repr(window_data["_main_window"]["_analysis"])
if save_queue_data:
dataset.attrs['queue_data'] = repr(window_data["_main_window"]["_queue"])
for pane_name,pane_position in window_data.items():
if pane_name != "_main_window":
dataset.attrs[pane_name] = pane_position
# Save analysis server settings:
#dataset = data_group.create_group("analysis_server")
#dataset.attrs['send_for_analysis'] = self.blacs.analysis_submission.toggle_analysis.get_active()
#dataset.attrs['server'] = self.blacs.analysis_submission.analysis_host.get_text()
| 52.694581 | 202 | 0.531691 |
79467bac96460fc060858f3f984b01bcdfde6f32 | 629 | py | Python | comparador_hashes/ch.py | mauriciopicirillo/Seguran-a_da_informa-o_com_Python | 0a439066dda42268de7338c815c4cf7872079211 | [
"MIT"
] | null | null | null | comparador_hashes/ch.py | mauriciopicirillo/Seguran-a_da_informa-o_com_Python | 0a439066dda42268de7338c815c4cf7872079211 | [
"MIT"
] | null | null | null | comparador_hashes/ch.py | mauriciopicirillo/Seguran-a_da_informa-o_com_Python | 0a439066dda42268de7338c815c4cf7872079211 | [
"MIT"
] | null | null | null | import hashlib
arquivo1 = 'a.txt'
arquivo2 = 'b.txt'
hash1 = hashlib.new('ripemd160')
hash1.update(open(arquivo1, 'rb').read())
hash2 = hashlib.new('ripemd160')
hash2.update(open(arquivo2, 'rb').read())
if hash1.digest() != hash2.digest():
print(f'O arquivo: {arquivo1} é diferente do arquivo: {arquivo2}')
print('O hash do arquivo a.txt é: ', hash1.hexdigest())
print('O hash do arquivo b.txt é: ', hash2.hexdigest())
else:
print(f'O arquivo: {arquivo1} é igual ao arquivo: {arquivo2}')
print('O hash do arquivo a.txt é: ', hash1.hexdigest())
print('O hash do arquivo b.txt é: ', hash2.hexdigest()) | 29.952381 | 70 | 0.662957 |
79467bb38f0bffc5757e730d320942d1b6f2b095 | 938 | py | Python | data/_unify.py | efsantrifuge/kr_tr_word_quizes | a49962a49a2e04d0e199a752ef3ac971420fb5ad | [
"MIT"
] | null | null | null | data/_unify.py | efsantrifuge/kr_tr_word_quizes | a49962a49a2e04d0e199a752ef3ac971420fb5ad | [
"MIT"
] | null | null | null | data/_unify.py | efsantrifuge/kr_tr_word_quizes | a49962a49a2e04d0e199a752ef3ac971420fb5ad | [
"MIT"
] | null | null | null | '''
simple job file to unify files
no user input for files to be processed, please edit the code
example command:
python _unify.py 1
python _unify.py 0
'''
import sys
def unify():
filename_prefix = ''
filename_postfix = '.txt'
count_begin = 1
count_end = 7
if sys.argv[1] == '1':
raw = True
else:
raw = False
data = ''
for i in range(count_begin, count_end + 1):
if raw is not True:
data += str(i) + '\n'
filename = filename_prefix + str(i) + filename_postfix
with open(filename, 'r', encoding='utf-8', errors='ignore') as file:
for line in file:
data += line
data += '\n\n'
if raw and data.find('\n\n') != -1:
data = data.replace('\n\n', '\n')
with open('output.txt', 'w', encoding='utf-8', errors='ignore') as ofile:
ofile.write(data)
if __name__ == '__main__':
unify()
| 19.957447 | 77 | 0.554371 |
79467c9a3c28ee88dd4039a803c6941b730ab341 | 1,245 | py | Python | Python/max-points-on-a-line.py | ShuaiWangGit/LeetCode | d85a4cb23f8f85059691994e7ad89001c6e4f3f6 | [
"MIT"
] | 5 | 2018-09-06T03:12:33.000Z | 2022-03-03T18:57:11.000Z | Python/max-points-on-a-line.py | pnandini/LeetCode | e746c3298be96dec8e160da9378940568ef631b1 | [
"MIT"
] | 1 | 2018-07-10T03:28:43.000Z | 2018-07-10T03:28:43.000Z | Python/max-points-on-a-line.py | pnandini/LeetCode | e746c3298be96dec8e160da9378940568ef631b1 | [
"MIT"
] | 5 | 2018-09-06T03:12:35.000Z | 2021-07-03T09:00:56.000Z | # Time: O(n^2)
# Space: O(n)
#
# Given n points on a 2D plane, find the maximum number of points that lie on the same straight line.
#
import collections
# Definition for a point
class Point:
def __init__(self, a=0, b=0):
self.x = a
self.y = b
class Solution(object):
def maxPoints(self, points):
"""
:type points: List[Point]
:rtype: int
"""
max_points = 0
for i, start in enumerate(points):
slope_count, same = collections.defaultdict(int), 1
for j in xrange(i + 1, len(points)):
end = points[j]
if start.x == end.x and start.y == end.y:
same += 1
else:
slope = float("inf")
if start.x - end.x != 0:
slope = (start.y - end.y) * 1.0 / (start.x - end.x)
slope_count[slope] += 1
current_max = same
for slope in slope_count:
current_max = max(current_max, slope_count[slope] + same)
max_points = max(max_points, current_max)
return max_points
if __name__ == "__main__":
print Solution().maxPoints([Point(), Point(), Point()])
| 27.666667 | 101 | 0.51245 |
79467de14e824fdd929e1d60e7b09d37d9c5c583 | 3,561 | py | Python | mlp-mmdgm/generate_data_mnist.py | jjzhunet9/mmdgm | 9422bdc88b913c09506944d3529f6f955b95687c | [
"MIT"
] | 22 | 2015-11-12T09:49:05.000Z | 2020-04-06T10:44:04.000Z | mlp-mmdgm/generate_data_mnist.py | jjzhunet9/mmdgm | 9422bdc88b913c09506944d3529f6f955b95687c | [
"MIT"
] | null | null | null | mlp-mmdgm/generate_data_mnist.py | jjzhunet9/mmdgm | 9422bdc88b913c09506944d3529f6f955b95687c | [
"MIT"
] | 11 | 2015-10-07T08:07:01.000Z | 2020-07-16T03:12:15.000Z | import numpy as np
import scipy.io as sio
import cPickle, gzip
import math
import os, sys
from anglepy import paramgraphics
# load data
print 'Loading data...'
f = gzip.open('./data/mnist/mnist_28.pkl.gz', 'rb')
(x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f)
f.close()
# choose number of images to transform
num_trans = 10000
data = (x_test[:num_trans,:]).T
pertub_label = np.ones(data.shape)
# perturb data
print 'Perturbing data...'
width = 28
height = 28
pertub_type = int(sys.argv[1])
pertub_prob = float(sys.argv[2])
noise_type = 2 # 0 or uniformly random
if pertub_type == 1:
data_perturbed = data + np.random.normal(0,0.4,(data.shape))
elif pertub_type == 2:
data_perturbed = data.copy()
data_perturbed *= (np.random.random(data.shape) > pertub_prob)
elif pertub_type == 3:
data_perturbed = data.copy()
pertub_prob = int(pertub_prob)
rec_h = pertub_prob
rec_w = rec_h
begin_h = (width - rec_w)/ 2
begin_w = (width - rec_w)/ 2
print rec_h, rec_w, begin_h, begin_w
rectengle = np.zeros(rec_h*rec_w)
for i in xrange(rec_h):
rectengle[i*rec_w:(i+1)*rec_w]=np.arange((begin_h+i)*width+begin_w,(begin_h+i)*width+begin_w+rec_w)
if noise_type == 1:
data_perturbed[rectengle.astype(np.int32),:] = 0
else:
data_perturbed[rectengle.astype(np.int32),:] = np.random.random((rectengle.shape[0],data.shape[1]))
pertub_label[rectengle.astype(np.int32),:] = 0
elif pertub_type == 4:
data_perturbed = np.random.random(data.shape)
sample = np.random.random(data.shape)
pertub_label[sample < pertub_prob] = 0
data_perturbed = pertub_label*data+(1-pertub_label)*data_perturbed
elif pertub_type == 5:
pertub_prob1 = float(sys.argv[3])
start = int(pertub_prob)
end = int(pertub_prob1)
data_perturbed = np.zeros(data.shape)
tmp_a = np.ones(width)
tmp_a[start:end] = 0
#print tmp_a.shape
#print tmp_a
tmp_b = np.tile(tmp_a, height)
print tmp_b.shape
print pertub_label.shape
pertub_label = (pertub_label.T*tmp_b).T
data_perturbed = pertub_label*data+(1-pertub_label)*data_perturbed
if pertub_type == 4:
sio.savemat('data_imputation/type_'+str(pertub_type)+'_params_'+str(int(pertub_prob*100))+'_noise_rawdata.mat', {'z_train' : x_train.T, 'z_test_original' : data, 'z_test' : data_perturbed, 'pertub_label' : pertub_label})
#print data_perturbed[:,:25].shape
image = paramgraphics.mat_to_img(data_perturbed[:,:25], (28,28), colorImg=False, scale=True)
image.save('data_imputation/test_noise_4_'+str(pertub_prob)+'.png', 'PNG')
elif pertub_type == 3:
sio.savemat('data_imputation/type_'+str(pertub_type)+'_params_'+str(pertub_prob)+'_noise_rawdata.mat', {'z_train' : x_train.T, 'z_test_original' : data, 'z_test' : data_perturbed, 'pertub_label' : pertub_label})
#print data_perturbed[:,:25].shape
image = paramgraphics.mat_to_img(data_perturbed[:,:25], (28,28), colorImg=False, scale=True)
image.save('data_imputation/test_noise_3_'+str(pertub_prob)+'.png', 'PNG')
elif pertub_type == 5:
sio.savemat('data_imputation/type_'+str(pertub_type)+'_params_'+str(start)+'_'+str(end)+'_noise_rawdata.mat', {'z_train' : x_train.T, 'z_test_original' : data, 'z_test' : data_perturbed, 'pertub_label' : pertub_label})
#print data_perturbed[:,:25].shape
image = paramgraphics.mat_to_img(data_perturbed[:,:25], (28,28), colorImg=False, scale=True)
image.save('data_imputation/test_noise_5_'+str(start)+'_'+str(end)+'.png', 'PNG')
| 39.131868 | 224 | 0.697276 |
79467fd06238a3a544540b4ba6f35eb88aa4b786 | 3,365 | py | Python | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Separation_Case0010.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Separation_Case0010.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Separation_Case0010.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : security-Separation_rights
Case Name : 三权分立后系统管理员可以在自己的表上创建索引
Description :
1.初始用户执行:CREATE USER sysadmin01 WITH SYSADMIN password '$PASSWORD';
2.sysadmin01 用户执行:CREATE TABLE table03(co1_1 int, col_2 int);
CREATE INDEX index01 ON table03(co1_1);
Expect :
1.CREATE ROLE
CREATE TABLE
2.CREATE INDEX
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Policy(unittest.TestCase):
def setUp(self):
logger.info(
'----Opengauss_Function_Security_Separation_Case0010 start----')
self.common = Common()
self.sh_primy = CommonSH('PrimaryDbUser')
self.userNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.Constant = Constant()
def test_policy(self):
logger.info('----------create user --------')
excute_cmd0 = f'source {self.DB_ENV_PATH};' \
f'gs_guc set -D {macro.DB_INSTANCE_PATH} ' \
f'-c "enableSeparationOfDuty=on";' \
f'gs_om -t stop && gs_om -t start'
msg0 = self.userNode.sh(excute_cmd0).result()
logger.info(msg0)
sql_cmd1 = f'CREATE USER sysadmin01 WITH SYSADMIN password' \
f' \'{macro.COMMON_PASSWD}\';'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
logger.info(msg1)
sql_cmd2 = 'CREATE TABLE table03(co1_1 int, col_2 int);' \
'CREATE INDEX index01 ON table03(co1_1);'
excute_cmd2 = f'source {self.DB_ENV_PATH};' \
f'gsql -d {self.userNode.db_name} -p ' \
f'{self.userNode.db_port} -U sysadmin01 -W ' \
f'\'{macro.COMMON_PASSWD}\' -c "{sql_cmd2}"'
logger.info(excute_cmd2)
msg2 = self.userNode.sh(excute_cmd2).result()
logger.info(msg2)
self.assertIn(self.Constant.CREATE_INDEX_SUCCESS_MSG, msg2)
def tearDown(self):
logger.info('-----------恢复配置,并清理环境-----------')
excute_cmd0 = f'source {self.DB_ENV_PATH};' \
f'gs_guc set -D {macro.DB_INSTANCE_PATH} ' \
f'-c "enableSeparationOfDuty=off";' \
f'gs_om -t stop && gs_om -t start;'
msg0 = self.userNode.sh(excute_cmd0).result()
logger.info(msg0)
sql_cmd1 = 'DROP TABLE IF EXISTS table03 CASCADE;' \
'DROP USER IF EXISTS sysadmin01 CASCADE;'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
logger.info(msg1)
logger.info(
'----Opengauss_Function_Security_Separation_Case0010 finish-----')
| 38.678161 | 84 | 0.628232 |
79468093116729e4b6804b1dd49ef30ef356e8c8 | 362 | py | Python | ezdisteach/model/__init__.py | call-learning/ez-disteach | 7636dfdbfb709769824266800ebba18be764ecc3 | [
"MIT"
] | null | null | null | ezdisteach/model/__init__.py | call-learning/ez-disteach | 7636dfdbfb709769824266800ebba18be764ecc3 | [
"MIT"
] | null | null | null | ezdisteach/model/__init__.py | call-learning/ez-disteach | 7636dfdbfb709769824266800ebba18be764ecc3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
for importer, modname, ispkg in pkgutil.walk_packages(path=__path__, prefix=__name__ + '.'):
__import__(modname)
from .base import ImportationError, create_model, AttributeNotInMetaInformation, ModelException, NoSuchModel, \
ReadonlyAttribute, UnsupportedFormat
| 36.2 | 111 | 0.781768 |
794681aadd15b4de1426eccab41d4766de40f459 | 351 | py | Python | backend/todolist/api/routes.py | ElPapi42/flask-todo | 526c5c397068754bfb67aac417558cbb7e640dfb | [
"MIT"
] | 1 | 2021-07-30T11:24:59.000Z | 2021-07-30T11:24:59.000Z | backend/todolist/api/routes.py | ElPapi42/flask-todo | 526c5c397068754bfb67aac417558cbb7e640dfb | [
"MIT"
] | 5 | 2019-12-29T15:31:31.000Z | 2021-05-10T22:25:27.000Z | backend/todolist/api/routes.py | ElPapi42/flask-todo | 526c5c397068754bfb67aac417558cbb7e640dfb | [
"MIT"
] | 8 | 2019-12-22T18:33:50.000Z | 2019-12-28T20:13:21.000Z | from flask_restful import Api
from . import api_bp
from .controllers import UserController, UserList, TaskList, TaskController
api = Api(api_bp)
api.add_resource(UserList, "/users/")
api.add_resource(UserController, "/users/<u_id>/")
api.add_resource(TaskList, "/users/<u_id>/tasks/")
api.add_resource(TaskController, "/users/<u_id>/tasks/<t_id>/") | 31.909091 | 75 | 0.766382 |
794681c02ef0cbf91026fe6897a071cb1b82d803 | 19,406 | py | Python | app.py | tahoeivanova/memory_app | 48c86189344c425473e6ea897c43b510cc727487 | [
"MIT"
] | null | null | null | app.py | tahoeivanova/memory_app | 48c86189344c425473e6ea897c43b510cc727487 | [
"MIT"
] | null | null | null | app.py | tahoeivanova/memory_app | 48c86189344c425473e6ea897c43b510cc727487 | [
"MIT"
] | null | null | null | from bottle import Bottle, route, static_file, template, run, request, redirect
from sqlAlchemy_classes import User, NumberResults, CardsResults, PiResults, Card, PathName, PathItems, session, cards_filter
import random
import itertools
from mnemo_functions import pi_reader
import hashlib
import os
application = Bottle()
# globals for User
user_global = ''
# globals for Number Memory
random_numbers_global = [] # a list for computer's numbers
# globals for Card Memory
cards_init_global = [] # an ordered list for select input form
cards_shuffled_global = [] # shuffled cards
# globals for Pi
pi = ''
pi_length = 0
pi_field_length = 0
# globals for paths
global path_global
# Index page
@application.route('/')
@application.route('/main')
@application.route('/index')
def index():
return template('index')
# Sign up
@application.route('/signup')
def signup():
return template('signup')
@application.post('/signup')
def do_signup():
nickname = request.forms.get('nickname')
email = request.forms.get('email')
password = request.forms.getunicode('password')
salt = os.urandom(32)
key = hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), salt, 100000)
password = salt + key
new_user = User(nickname=nickname, email=email, password=password)
session.add(new_user)
try:
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return redirect('/')
# return f'<h1> Hello, {nickname}!<br> Your e-mail: {email}<br> Your password: {password} </h1> <br><p>Now you\'re in database!</p>'
# Log in
@application.route('/login')
def login():
return template('login')
@application.post('/login')
def do_login():
nickname = request.forms.get('nickname')
password_to_check = request.forms.getunicode('password') # str - user's input password
global user_global
user_global = nickname
user = session.query(User).filter_by(nickname=nickname).first()
if user:
password_from_storage = user.password # bytes in sql
salt = password_from_storage[:32]
key = password_from_storage[32:]
new_key = hashlib.pbkdf2_hmac('sha256', password_to_check.encode('utf-8'), salt, 100000)
if new_key == key:
# return '<h1>You are logged in! </h1>'
return redirect('/profile')
return '<h1>Invalid user or password!</h1>'
# Profile
@application.route('/profile')
def profile():
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
# registered user
if user:
results_number = session.query(NumberResults).filter_by(user_id=user.id).order_by(NumberResults.attempt_id.desc()).first()
results_cards = session.query(CardsResults).filter_by(user_id=user.id).order_by(CardsResults.attempt_id.desc()).first()
results_pi = session.query(PiResults).filter_by(user_id=user.id).order_by(PiResults.attempt_id.desc()).first()
return template('profile', user=user_global, results_number=results_number, results_cards=results_cards, results_pi=results_pi)
else:
return template('profile', user='you\'re not logged in', results_number='no results', results_cards='no results', results_pi='no results')
# Log Out
@application.route('/logout')
def logout():
global user_global
session.close()
user_global = ''
return redirect('/')
# I - Number Memory
# 1 - Set Options
@application.route('/number_memory')
def set_options():
# clear global variables
global random_numbers_global
random_numbers_global.clear()
return template('number_options')
# 2 - Number Training
@application.post('/number_memory')
def set_options():
# set number of numbers
numbers_all = request.forms.get('numbers_all', type=int)
# set number of digits in a number
digits_in_number = request.forms.get('digits_in_number', type=int)
# time for one number
number_time = request.forms.get('number_time', type=int)
# time for js code
number_time *= 1000
#_________training generator____________
# list of random numbers
global random_numbers_global
# generate random numbers (1 - (0, 9), else - (10, 99))
random_numbers_global = [random.randint(1, 9) for x in range(numbers_all)] if digits_in_number == 1 else [random.randint(10, 99) for x in range(numbers_all)]
# return template with Number Memory Training
return template('number_training', numbers_all=numbers_all, random_numbers_global=random_numbers_global, number_time=number_time)
# 3 - Number Input
@application.post('/number_input')
def digits_answer_input():
# create a necessary amount of input fields
global random_numbers_global
return template('number_input', fields_amount=len(random_numbers_global))
# 4 - Number Results
@application.post('/number_results')
def number_results():
url = '/number_memory' # training one more time
# get the list of user's answers
# global input_numbers_global
user_input_list = []
for i in range(len(random_numbers_global)): # amount of numbers
i +=1 # counter (fields starting count from 1)
i = str(i) # counter to string
# the same counter is in a template, so we can define user's answer
user_input = request.forms.get('number'+ i, type=int) # get the user's input ('i' is a number of field)
user_input_list.append(user_input) # add user's inputs in a global list
# status Win or Loose
results_status = ''
# make the zip lists of results for template
result_lists = itertools.zip_longest(user_input_list, random_numbers_global)
# find a User in SQL
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
# registered user
if user:
# find User's Results Table
# find user's table of results (last result)
results_table = session.query(NumberResults).filter_by(user_id=user.id).order_by(NumberResults.attempt_id.desc()).first()
# Comparing two lists (computer list and user list)
# TRUE
# add +1 to the win_amount in SQL
if user_input_list == random_numbers_global:
results_status = 'WIN'
# if it is a first time, create a table, else - find win_amount and add 1
win_amount_new = 1 if (results_table == None) else results_table.win_amount + 1
loose_results = 0 if (results_table == None) else results_table.loose_amount
results = NumberResults(user_id=user.id, win_amount=win_amount_new, loose_amount=loose_results)
session.add(results)
session.commit()
win_results = results.win_amount
# FALSE
# add +1 to the loose_amount in SQL
else:
results_status = 'LOOSE'
# if it is a first time, create a table, else - find loose_amount and add 1
loose_amount_new = 1 if (results_table == None) else results_table.loose_amount + 1
win_results = 0 if (results_table == None) else results_table.win_amount
results = NumberResults(user_id=user.id, win_amount=win_results, loose_amount=loose_amount_new)
session.add(results)
session.commit()
loose_results = results.loose_amount
return template('results', zipped_list=result_lists, results_status=results_status, win_results=win_results, loose_results=loose_results, url=url)
# not registered user
return template('results', zipped_list=result_lists, results_status='You\'re not logged in', win_results='You\'re not logged in', loose_results='You\'re not logged in', url=url)
# II - Cards Memory
# 1 - Set Options
@application.route('/cards_memory')
def cards_options():
return template('cards_set_options')
@application.post('/cards_memory')
def show_cards():
cards_all = request.forms.get('cards_all', type=int)
cards_time = request.forms.get('cards_time', type=int)
cards_time *= 1000
# filter cards from SQL (36, 52, 53)
cards = cards_filter(cards_all)
# cards_initial - ordered list for answers
cards_initial = cards.copy()
global cards_init_global
cards_init_global = cards_initial
random.shuffle(cards)
global cards_shuffled_global
cards_shuffled_global = cards
return template('cards_training', cards_shuffled=cards_shuffled_global, cards_time=cards_time)
@application.post('/cards_input')
def show_cards_post():
global cards_init_global
return template('cards_input', card_list=cards_init_global)
@application.post('/cards_results')
def cards_input():
url = '/cards_memory'
user_input_list = []
global cards_shuffled_global
for i in range(1, len(cards_shuffled_global)+1):
i = str(i)
user_answer_cards = request.forms.getunicode('card'+ i)
user_answer_cards=str(user_answer_cards)
user_input_list.append(user_answer_cards)
# status Win or Loose
results_status = ''
# make the zip lists of results for template
result_lists = itertools.zip_longest(user_input_list, cards_shuffled_global)
# find a User in SQL
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
# registered user
if user:
# find User's Results Table
# find user's table of results (last result)
results_table = session.query(CardsResults).filter_by(user_id=user.id).order_by(
CardsResults.attempt_id.desc()).first()
# Comparing two lists (computer list and user list)
# TRUE
# add +1 to the win_amount in SQL
if user_input_list == cards_shuffled_global:
results_status = 'WIN'
# if it is a first time, create a table, else - find win_amount and add 1
win_amount_new = 1 if (results_table == None) else results_table.win_amount + 1
loose_results = 0 if (results_table == None) else results_table.loose_amount
results = CardsResults(user_id=user.id, win_amount=win_amount_new, loose_amount=loose_results)
session.add(results)
session.commit()
win_results = results.win_amount
# FALSE
# add +1 to the loose_amount in SQL
else:
results_status = 'LOOSE'
# if it is a first time, create a table, else - find loose_amount and add 1
loose_amount_new = 1 if (results_table == None) else results_table.loose_amount + 1
win_results = 0 if (results_table == None) else results_table.win_amount
results = CardsResults(user_id=user.id, win_amount=win_results, loose_amount=loose_amount_new)
session.add(results)
session.commit()
loose_results = results.loose_amount
return template('results', zipped_list=result_lists, results_status=results_status,
win_results=win_results, loose_results=loose_results, url=url)
# not registered user
return template('results', zipped_list=result_lists, results_status='You\'re not logged in',
win_results='You\'re not logged in', loose_results='You\'re not logged in', url=url)
# III - Pi
# get pi number
@application.route('/pi/<n:int>')
def pi(n):
pi = pi_reader(n)
return f'<h1>3.{pi}</h1>'
# Pi Index
@application.route('/pi')
def pi_page():
return template('pi_main')
# Options
@application.post('/pi')
def pi_page():
global pi_length
global pi_field_length
pi_length = request.forms.get('pi_length', type=int) # digits after comma
pi_field_length = request.forms.get('pi_field_length', type=int) # amount of digits in one field
return template('pi_input', pi_length=pi_length, pi_field_length=pi_field_length)
@application.post('/pi_results')
def results_pi():
url = '/pi'
global pi_length
global pi_field_length
if pi_length%pi_field_length==0:
count = pi_length//pi_field_length
else:
count = pi_length // pi_field_length + 1
input_pi = ''
for i in range(1, count+1):
i = str(i)
pi_answer = request.forms.get('pi_answer'+i) # сколько цифр после запятой
input_pi+=pi_answer
print(input_pi)
# Define Pi global
global pi
pi = pi_reader(pi_length)
# pi = pi[2:] # pi without '3.'
input_pi = list(input_pi)
pi = list(pi)
result_lists = itertools.zip_longest(input_pi,pi)
results_status = ''
# find a User in SQL
# global user_global
try:
user = session.query(User).filter_by(nickname=user_global).first()
except:
user = ''
# registered user
# if user:
# find User's Results Table
# find user's table of results (last result)
# results_table = session.query(PiResults).filter_by(user_id=user.id).order_by(
# PiResults.attempt_id.desc()).first()
# Comparing two lists (computer list and user list)
# RESULTS
if input_pi == pi:
results_status = "WIN"
# if it is a first time, create a table, else - find win_amount and add 1
pi_signs_amount = len(pi)
if user:
results = PiResults(user_id=user.id, pi_signs_amount=pi_signs_amount)
session.add(results)
session.commit()
results = results.pi_signs_amount
else:
results_status = "LOOSE"
pi_signs_amount = 'Mistakes'
return template('results', zipped_list=result_lists, results_status=results_status,
win_results=f'Вы знаете {pi_signs_amount} знаков Пи после запятой', loose_results='Not counted',
url=url)
#_______
# IV - 1 - Path
# list of paths
@application.route('/path')
def path_names():
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
if user:
path_names_all = session.query(PathName).filter_by(user_id=user.id).all()
return template('path_names', path_names=path_names_all)
else:
return redirect('/login')
# add new path_name
@application.post('/path')
def add_path_name():
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
path_name = request.forms.getunicode('path_name')
path_name_new = PathName(user_id=user.id, name=path_name)
session.add(path_name_new)
session.commit()
return redirect('/path')
# delete path_name
@application.route('/path_delete/<path_id:int>')
def delete_path_name(path_id):
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
path = session.query(PathName).filter_by(user_id=user.id, id=path_id).first()
if request.GET.delete:
session.delete(path)
return redirect('/path')
return template('path_delete', path=path)
# edit path_name
@application.route('/path_name_edit/<path_id:int>', method="GET")
def edit_path_name(path_id):
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
old_path_name = session.query(PathName).filter_by(user_id=user.id, id=path_id).first()
if request.GET.edit:
path_name_edited = request.GET.path_name.strip()
old_path_name.name = path_name_edited
return redirect('/path')
else:
return template('path_name_edit', old_path_name=old_path_name)
# IV - 2 - Path Items
# add, edit, delete path_items
# list of path items
@application.route('/path/<path_id>')
def path_item(path_id):
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
path_items = session.query(PathItems).filter_by(user_id=user.id, path_id=path_id).all()
if path_items:
return template('path_items', path=path_items, path_id=path_id)
else:
return template('path_no_items', path_id=path_id)
# add path item
@application.post('/path/<path_id>')
def add_path_item(path_id):
item = request.forms.getunicode('item')
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
# path = session.query(PathItems).filter_by(user_id=user.id, path_id=path_id).first()
new_item = PathItems(user_id=user.id, path_id=path_id, name=item)
session.add(new_item)
session.commit()
return redirect(f'/path/{path_id}')
# delete path item
@application.route('/path_item_delete/<path_id:int>/<item_id:int>')
def delete_path_item(path_id, item_id):
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
path_item = session.query(PathItems).filter_by(user_id=user.id, path_id=path_id, id=item_id).first()
if request.GET.delete:
session.delete(path_item)
return redirect(f'/path/{path_id}')
return template('path_item_delete', path_item=path_item, path_id=path_id, item_id=item_id)
# edit path item
@application.route('/path_item_edit/<path_id:int>/<item_id:int>', method="GET")
def edit_path_item(path_id, item_id):
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
old_item_name = session.query(PathItems).filter_by(user_id=user.id, path_id=path_id, id=item_id).first()
if request.GET.edit:
item_name_edited = request.GET.item_name.strip()
old_item_name.name = item_name_edited
return redirect(f'/path/{path_id}')
else:
return template('path_item_edit', old_item_name=old_item_name, path_id=path_id, item_id=item_id)
# IV - 3 - Path Training
@application.route('/path_training/<path_id:int>', method="GET")
def path_training(path_id):
global user_global
user = session.query(User).filter_by(nickname=user_global).first()
path_items = session.query(PathItems).filter_by(user_id=user.id, path_id=path_id).all()
path_item_list = []
for item in path_items:
item = str(item)
path_item_list.append(item)
global path_global
path_global = path_item_list
return template('path_training', path_all = len(path_global), path_global=path_global, path_time=5000)
@application.post('/path_input')
def path_input():
global path_global
return template('path_input', fields_amount=len(path_global))
@application.post('/path_results')
def path_results():
url = '/path'
user_input_list = []
global path_global
for i in range(1, len(path_global)+1):
i = str(i)
user_input = request.forms.getunicode('number' + i) # get the user's input ('i' is a number of field)
user_input_list.append(user_input) # add user's inputs in a global list
# status Win or Loose
results_status = ''
# make the zip lists of results for template
result_lists = itertools.zip_longest(user_input_list, path_global)
# registered user
if user_input_list == path_global:
results_status = 'WIN'
# FALSE
# add +1 to the loose_amount in SQL
else:
results_status = 'LOOSE'
return template('results', zipped_list=result_lists, results_status=results_status, win_results='no result', loose_results='no result', url=url)
# V - Theory
@application.route('/theory')
def theory():
return template('articles')
# static files
@application.route('/static/<filename:path>')
def server_static(filename):
return static_file(filename, root='./static/')
if __name__ == '__main__':
application.run(host='0.0.0.0', port='8080')
| 34.105448 | 181 | 0.689941 |
794681cb7fe5ff828e5d95dd594e5eed8bfe10ca | 28,797 | py | Python | vut/lib/python3.8/site-packages/pipenv/patched/yaml2/constructor.py | dan-mutua/djangowk1 | 1e5dcb6443ef21451e21845ec639198719e11b10 | [
"MIT"
] | 6,263 | 2017-01-20T17:41:36.000Z | 2022-02-15T20:48:57.000Z | vut/lib/python3.8/site-packages/pipenv/patched/yaml2/constructor.py | dan-mutua/djangowk1 | 1e5dcb6443ef21451e21845ec639198719e11b10 | [
"MIT"
] | 1,100 | 2017-01-20T19:41:52.000Z | 2017-12-06T09:15:13.000Z | vut/lib/python3.8/site-packages/pipenv/patched/yaml2/constructor.py | dan-mutua/djangowk1 | 1e5dcb6443ef21451e21845ec639198719e11b10 | [
"MIT"
] | 366 | 2017-01-21T10:06:52.000Z | 2021-11-25T17:09:19.000Z |
__all__ = [
'BaseConstructor',
'SafeConstructor',
'FullConstructor',
'UnsafeConstructor',
'Constructor',
'ConstructorError'
]
from error import *
from nodes import *
import datetime
import binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class timezone(datetime.tzinfo):
def __init__(self, offset):
self._offset = offset
seconds = abs(offset).total_seconds()
self._name = 'UTC%s%02d:%02d' % (
'-' if offset.days < 0 else '+',
seconds // 3600,
seconds % 3600 // 60
)
def tzname(self, dt=None):
return self._name
def utcoffset(self, dt=None):
return self._offset
def dst(self, dt=None):
return datetime.timedelta(0)
__repr__ = __str__ = tzname
class BaseConstructor(object):
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def check_state_key(self, key):
"""Block special attributes/methods from being set in a newly created
object, to prevent user-controlled methods from being called during
deserialization"""
if self.get_state_keys_blacklist_regexp().match(key):
raise ConstructorError(None, None,
"blacklisted key '%s' in instance state found" % (key,), None)
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if tag_prefix is not None and node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = generator.next()
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError, exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
add_constructor = classmethod(add_constructor)
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
add_multi_constructor = classmethod(add_multi_constructor)
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == u'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return BaseConstructor.construct_scalar(self, node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == u'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == u'tag:yaml.org,2002:value':
key_node.tag = u'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return BaseConstructor.construct_mapping(self, node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
u'yes': True,
u'no': False,
u'true': True,
u'false': False,
u'on': True,
u'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
value = self.construct_scalar(node)
try:
return str(value).decode('base64')
except (binascii.Error, UnicodeEncodeError), exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
ur'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
tzinfo = None
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
tzinfo = timezone(delta)
elif values['tz']:
tzinfo = timezone(datetime.timedelta(0))
return datetime.datetime(year, month, day, hour, minute, second, fraction,
tzinfo=tzinfo)
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
node.start_mark)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class FullConstructor(SafeConstructor):
# 'extend' is blacklisted because it is used by
# construct_python_object_apply to add `listitems` to a newly generate
# python instance
def get_state_keys_blacklist(self):
return ['^extend$', '^__.*__$']
def get_state_keys_blacklist_regexp(self):
if not hasattr(self, 'state_keys_blacklist_regexp'):
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
return self.state_keys_blacklist_regexp
def construct_python_str(self, node):
return self.construct_scalar(node).encode('utf-8')
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_long(self, node):
return long(self.construct_yaml_int(node))
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
if unsafe:
try:
__import__(name)
except ImportError, exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
if name not in sys.modules:
raise ConstructorError("while constructing a Python module", mark,
"module %r is not imported" % name.encode('utf-8'), mark)
return sys.modules[name]
def find_python_name(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if u'.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = '__builtin__'
object_name = name
if unsafe:
try:
__import__(module_name)
except ImportError, exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
if module_name not in sys.modules:
raise ConstructorError("while constructing a Python object", mark,
"module %r is not imported" % module_name.encode('utf-8'), mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r" % (object_name.encode('utf-8'),
module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_module(suffix, node.start_mark)
class classobj: pass
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False, unsafe=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if not (unsafe or isinstance(cls, type) or isinstance(cls, type(self.classobj))):
raise ConstructorError("while constructing a Python instance", node.start_mark,
"expected a class, but found %r" % type(cls),
node.start_mark)
if newobj and isinstance(cls, type(self.classobj)) \
and not args and not kwds:
instance = self.classobj()
instance.__class__ = cls
return instance
elif newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state, unsafe=False):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
if not unsafe and state:
for key in state.keys():
self.check_state_key(key)
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
if not unsafe:
self.check_state_key(key)
setattr(instance, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/none',
FullConstructor.construct_yaml_null)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/bool',
FullConstructor.construct_yaml_bool)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/str',
FullConstructor.construct_python_str)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
FullConstructor.construct_python_unicode)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/int',
FullConstructor.construct_yaml_int)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/long',
FullConstructor.construct_python_long)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/float',
FullConstructor.construct_yaml_float)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/complex',
FullConstructor.construct_python_complex)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/list',
FullConstructor.construct_yaml_seq)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/tuple',
FullConstructor.construct_python_tuple)
FullConstructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
FullConstructor.construct_yaml_map)
FullConstructor.add_multi_constructor(
u'tag:yaml.org,2002:python/name:',
FullConstructor.construct_python_name)
FullConstructor.add_multi_constructor(
u'tag:yaml.org,2002:python/module:',
FullConstructor.construct_python_module)
FullConstructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object:',
FullConstructor.construct_python_object)
FullConstructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/new:',
FullConstructor.construct_python_object_new)
class UnsafeConstructor(FullConstructor):
def find_python_module(self, name, mark):
return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
def find_python_name(self, name, mark):
return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
return super(UnsafeConstructor, self).make_python_instance(
suffix, node, args, kwds, newobj, unsafe=True)
def set_python_instance_state(self, instance, state):
return super(UnsafeConstructor, self).set_python_instance_state(
instance, state, unsafe=True)
UnsafeConstructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/apply:',
UnsafeConstructor.construct_python_object_apply)
# Constructor is same as UnsafeConstructor. Need to leave this in place in case
# people have extended it directly.
class Constructor(UnsafeConstructor):
pass
| 37.840999 | 112 | 0.597979 |
794681cc6b1a39751766a5a689ac39aeb7ad93ab | 17,696 | py | Python | imbalanced_ensemble/ensemble/_bagging.py | ZhiningLiu1998/imbalanced-ensemble | 26670c8a6b7bab26ae1e18cba3174a9d9038a680 | [
"MIT"
] | 87 | 2021-05-19T08:29:26.000Z | 2022-03-30T23:59:05.000Z | imbalanced_ensemble/ensemble/_bagging.py | ZhiningLiu1998/imbalanced-ensemble | 26670c8a6b7bab26ae1e18cba3174a9d9038a680 | [
"MIT"
] | 8 | 2021-05-28T10:27:28.000Z | 2022-01-11T11:21:03.000Z | imbalanced_ensemble/ensemble/_bagging.py | ZhiningLiu1998/imbalanced-ensemble | 26670c8a6b7bab26ae1e18cba3174a9d9038a680 | [
"MIT"
] | 18 | 2021-05-19T08:30:29.000Z | 2022-03-28T08:30:10.000Z | """Base classes for all bagging-like methods in imbalanced_ensemble.
ResampleBaggingClassifier Base class for all resampling +
bagging imbalanced ensemble classifier.
"""
# Authors: Zhining Liu <[email protected]>
# License: MIT
# %%
from abc import ABCMeta, abstractmethod
import numpy as np
import numbers
import itertools
from warnings import warn
from joblib import Parallel
from sklearn.base import clone
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble._base import _partition_estimators
from sklearn.pipeline import Pipeline as skPipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state
from sklearn.utils.validation import _check_sample_weight, has_fit_parameter
from sklearn.utils.fixes import delayed
from sklearn.utils.random import sample_without_replacement
from .base import ImbalancedEnsembleClassifierMixin, MAX_INT
from ..pipeline import Pipeline
from ..utils._validation_data import check_eval_datasets
from ..utils._validation_param import (check_train_verbose,
check_eval_metrics,
check_type)
from ..utils._validation import (_deprecate_positional_args,
check_sampling_strategy,
check_target_type)
from ..utils._docstring import (FuncSubstitution,
FuncGlossarySubstitution,
_get_parameter_docstring)
# # For local test
# import sys
# sys.path.append("..")
# from ensemble.base import ImbalancedEnsembleClassifierMixin, MAX_INT
# from pipeline import Pipeline
# from utils._validation_data import check_eval_datasets
# from utils._validation_param import (check_train_verbose,
# check_eval_metrics,
# check_type)
# from utils._validation import (_deprecate_positional_args,
# check_sampling_strategy,
# check_target_type)
# from utils._docstring import (FuncSubstitution,
# _get_parameter_docstring)
def _generate_indices(random_state, bootstrap, n_population, n_samples):
"""Draw randomly sampled indices."""
# Draw sample indices
if bootstrap:
indices = random_state.randint(0, n_population, n_samples)
else:
indices = sample_without_replacement(n_population, n_samples,
random_state=random_state)
return indices
def _generate_bagging_indices(random_state, bootstrap_features,
bootstrap_samples, n_features, n_samples,
max_features, max_samples):
"""Randomly draw feature and sample indices."""
# Get valid random state
random_state = check_random_state(random_state)
# Draw indices
feature_indices = _generate_indices(random_state, bootstrap_features,
n_features, max_features)
sample_indices = _generate_indices(random_state, bootstrap_samples,
n_samples, max_samples)
return feature_indices, sample_indices
def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight,
seeds, total_n_estimators, verbose):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble._max_features
max_samples = ensemble._max_samples
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
# Check if the base_estimator supports sample_weight
base_estimator_ = ensemble.base_estimator_
while (isinstance(base_estimator_, skPipeline)): # for Pipelines
base_estimator_ = base_estimator_._final_estimator
support_sample_weight = has_fit_parameter(base_estimator_, "sample_weight")
if not support_sample_weight and sample_weight is not None:
raise ValueError("The base estimator doesn't support sample weight")
# Build estimators
estimators = []
estimators_features = []
estimators_n_training_samples = []
for i in range(n_estimators):
if verbose > 1:
print("Building estimator %d of %d for this parallel run "
"(total %d)..." % (i + 1, n_estimators, total_n_estimators))
random_state = seeds[i]
estimator = ensemble._make_estimator(append=False,
random_state=random_state)
# Draw random feature, sample indices
features, indices = _generate_bagging_indices(random_state,
bootstrap_features,
bootstrap, n_features,
n_samples, max_features,
max_samples)
# Draw samples, using sample weights, and then fit
if support_sample_weight:
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
estimator.fit((X[indices])[:, features], y[indices],
sample_weight=curr_sample_weight[indices])
else:
estimator.fit((X[indices])[:, features], y[indices])
if hasattr(estimator, 'n_training_samples_'):
n_training_samples = getattr(estimator, 'n_training_samples_')
else: n_training_samples = len(indices)
estimators.append(estimator)
estimators_features.append(features)
estimators_n_training_samples.append(n_training_samples)
return estimators, estimators_features, estimators_n_training_samples
_super = BaggingClassifier
class ResampleBaggingClassifier(ImbalancedEnsembleClassifierMixin,
BaggingClassifier, metaclass=ABCMeta):
"""Base class for all resampling + bagging imbalanced ensemble classifier.
Warning: This class should not be used directly. Use the derive classes
instead.
"""
_ensemble_type = 'bagging'
_solution_type = 'resampling'
_training_type = 'parallel'
_properties = {
'ensemble_type': _ensemble_type,
'solution_type': _solution_type,
'training_type': _training_type,
}
@_deprecate_positional_args
def __init__(self,
base_estimator=None,
n_estimators=10,
*,
base_sampler,
sampling_type,
sampling_strategy="auto",
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0,):
self.sampling_strategy = sampling_strategy
self._sampling_type = sampling_type
self.base_sampler = base_sampler
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
def _validate_y(self, y):
"""Validate the label vector."""
y_encoded = super()._validate_y(y)
if (
isinstance(self.sampling_strategy, dict)
and self.base_sampler_._sampling_type != "bypass"
):
self._sampling_strategy = {
np.where(self.classes_ == key)[0][0]: value
for key, value in check_sampling_strategy(
self.sampling_strategy,
y,
self.base_sampler_._sampling_type,
).items()
}
else:
self._sampling_strategy = self.sampling_strategy
return y_encoded
def _validate_estimator(self, default=DecisionTreeClassifier()):
"""Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute."""
if not isinstance(self.n_estimators, (numbers.Integral, np.integer)):
raise ValueError(
f"n_estimators must be an integer, " f"got {type(self.n_estimators)}."
)
if self.n_estimators <= 0:
raise ValueError(
f"n_estimators must be greater than zero, " f"got {self.n_estimators}."
)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = clone(default)
# validate sampler and sampler_kwargs
# validated sampler stored in self.base_sampler_
try:
self.base_sampler_ = clone(self.base_sampler)
except Exception as e:
e_args = list(e.args)
e_args[0] = "Exception occurs when trying to validate" + \
" base_sampler: " + e_args[0]
e.args = tuple(e_args)
raise e
if self.base_sampler_._sampling_type != "bypass":
self.base_sampler_.set_params(sampling_strategy=self._sampling_strategy)
self.base_sampler_.set_params(**self.sampler_kwargs_)
self.base_estimator_ = Pipeline(
[
("sampler", self.base_sampler_),
("classifier", base_estimator),
]
)
def _more_tags(self):
tags = super()._more_tags()
tags_key = "_xfail_checks"
failing_test = "check_estimators_nan_inf"
reason = "Fails because the sampler removed infinity and NaN values"
if tags_key in tags:
tags[tags_key][failing_test] = reason
else:
tags[tags_key] = {failing_test: reason}
return tags
@_deprecate_positional_args
@FuncSubstitution(
eval_datasets=_get_parameter_docstring('eval_datasets'),
eval_metrics=_get_parameter_docstring('eval_metrics'),
train_verbose=_get_parameter_docstring('train_verbose', **_properties),
)
def _fit(self, X, y,
*,
sample_weight=None,
sampler_kwargs:dict={},
max_samples=None,
eval_datasets:dict=None,
eval_metrics:dict=None,
train_verbose:bool or int or dict,
):
"""Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
sampler_kwargs : dict, default={}
The kwargs to use as additional parameters when instantiating a
new sampler. If none are given, default parameters are used.
max_samples : int or float, default=None
Argument to use instead of self.max_samples.
%(eval_datasets)s
%(eval_metrics)s
%(train_verbose)s
Returns
-------
self : object
"""
# Check data, sampler_kwargs and random_state
check_target_type(y)
self.sampler_kwargs_ = check_type(
sampler_kwargs, 'sampler_kwargs', dict)
random_state = check_random_state(self.random_state)
# Convert data (X is required to be 2d and indexable)
check_x_y_args = {
'accept_sparse': ['csr', 'csc'],
'dtype': None,
'force_all_finite': False,
'multi_output': True,
}
X, y = self._validate_data(X, y, **check_x_y_args)
# Check evaluation data
self.eval_datasets_ = check_eval_datasets(eval_datasets, X, y, **check_x_y_args)
# Check evaluation metrics
self.eval_metrics_ = check_eval_metrics(eval_metrics)
# Check verbose
self.train_verbose_ = check_train_verbose(
train_verbose, self.n_estimators, **self._properties)
self._init_training_log_format()
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
# Remap output
n_samples, self.n_features_in_ = X.shape
self._n_samples = n_samples
y = self._validate_y(y)
# Check parameters
self._validate_estimator()
# Validate max_samples
if max_samples is None:
max_samples = self.max_samples
if not isinstance(max_samples, numbers.Integral):
max_samples = int(max_samples * X.shape[0])
if not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
# Store validated integer row sampling value
self._max_samples = max_samples
# Validate max_features
if isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
elif isinstance(self.max_features, float):
max_features = self.max_features * self.n_features_in_
else:
raise ValueError("max_features must be int or float")
if not (0 < max_features <= self.n_features_in_):
raise ValueError("max_features must be in (0, n_features]")
max_features = max(1, int(max_features))
# Store validated integer feature sampling value
self._max_features = max_features
# Other checks
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available"
" if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or not hasattr(self, 'estimators_'):
# Free allocated memory, if any
self.estimators_ = []
self.estimators_features_ = []
self.estimators_n_training_samples_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(n_more_estimators,
self.n_jobs)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose,
**self._parallel_args())(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
seeds[starts[i]:starts[i + 1]],
total_n_estimators,
verbose=self.verbose)
for i in range(n_jobs))
# Reduce
self.estimators_ += list(itertools.chain.from_iterable(
t[0] for t in all_results))
self.estimators_features_ += list(itertools.chain.from_iterable(
t[1] for t in all_results))
self.estimators_n_training_samples_ += list(itertools.chain.from_iterable(
t[2] for t in all_results))
if self.oob_score:
self._set_oob_score(X, y)
# Print training infomation to console.
self._training_log_to_console()
return self
@abstractmethod
def fit(self, X, y, sample_weight, **kwargs):
"""Needs to be implemented in the derived class"""
pass
@FuncGlossarySubstitution(_super.predict_log_proba, 'classes_')
def predict_log_proba(self, X):
return super().predict_log_proba(X)
@FuncGlossarySubstitution(_super.predict_proba, 'classes_')
def predict_proba(self, X):
return super().predict_proba(X)
@FuncGlossarySubstitution(_super.predict_proba, 'classes_')
def set_params(self, **params):
return super().set_params(**params)
| 36.040733 | 88 | 0.609912 |
794683430f090cd0dc05173cfdff1f5443606f7b | 8,800 | py | Python | libcloud/common/aliyun.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 1,435 | 2015-01-07T05:32:51.000Z | 2022-03-25T19:39:34.000Z | libcloud/common/aliyun.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 1,158 | 2015-01-04T18:08:42.000Z | 2022-03-24T14:34:57.000Z | libcloud/common/aliyun.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 832 | 2015-01-05T09:20:21.000Z | 2022-03-24T19:22:19.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import hmac
import sys
import time
import uuid
from libcloud.utils.py3 import ET
from libcloud.common.base import ConnectionUserAndKey, XmlResponse
from libcloud.common.types import MalformedResponseError
from libcloud.utils.py3 import b, u, urlquote, PY3
from libcloud.utils.xml import findtext
__all__ = [
'AliyunXmlResponse',
'AliyunRequestSigner',
'AliyunRequestSignerAlgorithmV1_0',
'SignedAliyunConnection',
'AliyunConnection',
'SIGNATURE_VERSION_1_0',
'DEFAULT_SIGNATURE_VERSION'
]
SIGNATURE_VERSION_1_0 = '1.0'
DEFAULT_SIGNATURE_VERSION = SIGNATURE_VERSION_1_0
class AliyunXmlResponse(XmlResponse):
namespace = None
def success(self):
return 200 <= self.status < 300
def parse_body(self):
"""
Each response from Aliyun contains a request id and a host id.
The response body is in utf-8 encoding.
"""
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
if PY3:
parser = ET.XMLParser(encoding='utf-8')
body = ET.XML(self.body.encode('utf-8'), parser=parser)
else:
try:
body = ET.XML(self.body)
except ValueError:
body = ET.XML(self.body.encode('utf-8'))
except Exception:
raise MalformedResponseError('Failed to parse XML',
body=self.body,
driver=self.connection.driver)
self.request_id = findtext(element=body, xpath='RequestId',
namespace=self.namespace)
self.host_id = findtext(element=body, xpath='HostId',
namespace=self.namespace)
return body
def parse_error(self):
"""
Parse error responses from Aliyun.
"""
body = super(AliyunXmlResponse, self).parse_error()
code, message = self._parse_error_details(element=body)
request_id = findtext(element=body, xpath='RequestId',
namespace=self.namespace)
host_id = findtext(element=body, xpath='HostId',
namespace=self.namespace)
error = {'code': code,
'message': message,
'request_id': request_id,
'host_id': host_id}
return u(error)
def _parse_error_details(self, element):
"""
Parse error code and message from the provided error element.
:return: ``tuple`` with two elements: (code, message)
:rtype: ``tuple``
"""
code = findtext(element=element, xpath='Code',
namespace=self.namespace)
message = findtext(element=element, xpath='Message',
namespace=self.namespace)
return (code, message)
class AliyunRequestSigner(object):
"""
Class handles signing the outgoing Aliyun requests.
"""
def __init__(self, access_key, access_secret, version):
"""
:param access_key: Access key.
:type access_key: ``str``
:param access_secret: Access secret.
:type access_secret: ``str``
:param version: API version.
:type version: ``str``
"""
self.access_key = access_key
self.access_secret = access_secret
self.version = version
def get_request_params(self, params, method='GET', path='/'):
return params
def get_request_headers(self, params, headers, method='GET', path='/'):
return params, headers
class AliyunRequestSignerAlgorithmV1_0(AliyunRequestSigner):
"""Aliyun request signer using signature version 1.0."""
def get_request_params(self, params, method='GET', path='/'):
params['Format'] = 'XML'
params['Version'] = self.version
params['AccessKeyId'] = self.access_key
params['SignatureMethod'] = 'HMAC-SHA1'
params['SignatureVersion'] = SIGNATURE_VERSION_1_0
params['SignatureNonce'] = _get_signature_nonce()
# TODO: Support 'ResourceOwnerAccount'
params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime())
params['Signature'] = self._sign_request(params, method, path)
return params
def _sign_request(self, params, method, path):
"""
Sign Aliyun requests parameters and get the signature.
StringToSign = HTTPMethod + '&' +
percentEncode('/') + '&' +
percentEncode(CanonicalizedQueryString)
"""
keys = list(params.keys())
keys.sort()
pairs = []
for key in keys:
pairs.append('%s=%s' % (_percent_encode(key),
_percent_encode(params[key])))
qs = urlquote('&'.join(pairs), safe='-_.~')
string_to_sign = '&'.join((method, urlquote(path, safe=''), qs))
b64_hmac = base64.b64encode(
hmac.new(b(self._get_access_secret()), b(string_to_sign),
digestmod=hashlib.sha1).digest()
)
return b64_hmac.decode('utf8')
def _get_access_secret(self):
return '%s&' % self.access_secret
class AliyunConnection(ConnectionUserAndKey):
pass
class SignedAliyunConnection(AliyunConnection):
api_version = None
def __init__(self, user_id, key, secure=True, host=None,
port=None, url=None, timeout=None, proxy_url=None,
retry_delay=None, backoff=None, api_version=None,
signature_version=DEFAULT_SIGNATURE_VERSION):
super(SignedAliyunConnection, self).__init__(user_id=user_id, key=key,
secure=secure,
host=host, port=port,
url=url, timeout=timeout,
proxy_url=proxy_url,
retry_delay=retry_delay,
backoff=backoff)
self.signature_version = str(signature_version)
if self.signature_version == '1.0':
signer_cls = AliyunRequestSignerAlgorithmV1_0
else:
raise ValueError('Unsupported signature_version: %s' %
signature_version)
if api_version is not None:
self.api_version = str(api_version)
else:
if self.api_version is None:
raise ValueError('Unsupported null api_version')
self.signer = signer_cls(access_key=self.user_id,
access_secret=self.key,
version=self.api_version)
def add_default_params(self, params):
params = self.signer.get_request_params(params=params,
method=self.method,
path=self.action)
return params
def _percent_encode(encode_str):
"""
Encode string to utf8, quote for url and replace '+' with %20,
'*' with %2A and keep '~' not converted.
:param src_str: ``str`` in the same encoding with sys.stdin,
default to encoding cp936.
:return: ``str`` represents the encoded result
:rtype: ``str``
"""
encoding = sys.stdin.encoding or 'cp936'
decoded = str(encode_str)
if PY3:
if isinstance(encode_str, bytes):
decoded = encode_str.decode(encoding)
else:
decoded = str(encode_str).decode(encoding)
res = urlquote(
decoded.encode('utf8'), '')
res = res.replace('+', '%20')
res = res.replace('*', '%2A')
res = res.replace('%7E', '~')
return res
def _get_signature_nonce():
return str(uuid.uuid4())
| 35.2 | 78 | 0.5825 |
794686dc4cab8b4ee324ef4fa53731df65b2ff56 | 152 | py | Python | setup.py | sheaffej/python-vscode-docker | 3ea980d77f7ec8424d59fb72e25b34f7c077a125 | [
"Apache-2.0"
] | null | null | null | setup.py | sheaffej/python-vscode-docker | 3ea980d77f7ec8424d59fb72e25b34f7c077a125 | [
"Apache-2.0"
] | null | null | null | setup.py | sheaffej/python-vscode-docker | 3ea980d77f7ec8424d59fb72e25b34f7c077a125 | [
"Apache-2.0"
] | null | null | null | # https://setuptools.readthedocs.io/en/latest/references/keywords.html
from setuptools import setup
setup(
name='My App',
packages=['myapp']
)
| 19 | 70 | 0.723684 |
794686e52b0b5ffa955fa6961b0be3021265baf9 | 842 | py | Python | src/preprocess.py | tapilab/is-jzheng | 600fd4de4478e8cf2c19fc196b5898ebe9f42cac | [
"MIT"
] | null | null | null | src/preprocess.py | tapilab/is-jzheng | 600fd4de4478e8cf2c19fc196b5898ebe9f42cac | [
"MIT"
] | null | null | null | src/preprocess.py | tapilab/is-jzheng | 600fd4de4478e8cf2c19fc196b5898ebe9f42cac | [
"MIT"
] | null | null | null | """
Load mutiple lines of json file from a single file.
return "text" line by line from each json.
"""
import json
import glob
import codecs
import re
import string
def load_file(path):
with codecs.open(path,encoding="utf-8") as file:
lines = file.readlines()
res = []
for line in lines:
line = json.loads(line)['text'].lower()
line = re.sub(r'\n|\t', ' ', line)
line = line.encode('utf-8')
line = line.strip().translate(None, string.punctuation)
res.append(line)
return res
def write_file(file , path):
f = codecs.open(path , "w" )
i = 0
for l in file:
f.write(l)
f.write("\n")
f.close()
if __name__ == "__main__":
path = "../data/review.txt"
path_write = "../data/processed_comments.txt"
comments = load_file(path)
write_file(comments, path_write)
| 15.035714 | 60 | 0.623515 |
79468756edf34818274e17f9e559b07de3802c48 | 473 | py | Python | 28. Codechef Problems/CodeChef/LTIME95C/EQUINOX.py | prachi996/HacktoberFest2021-5 | d386e40952fadc7545b8a9744454fa9936c2d133 | [
"MIT"
] | 1 | 2021-10-06T10:29:51.000Z | 2021-10-06T10:29:51.000Z | 28. Codechef Problems/CodeChef/LTIME95C/EQUINOX.py | prachi996/HacktoberFest2021-5 | d386e40952fadc7545b8a9744454fa9936c2d133 | [
"MIT"
] | null | null | null | 28. Codechef Problems/CodeChef/LTIME95C/EQUINOX.py | prachi996/HacktoberFest2021-5 | d386e40952fadc7545b8a9744454fa9936c2d133 | [
"MIT"
] | null | null | null | """
Problem Name: Equinox
CodeChef Link: https://www.codechef.com/LTIME95C/problems/EQUINOX/
Problem Code: EQUINOX
"""
for i in range(int(input())):
N, A, B = map(int, input().split())
anu = 0
sar = 0
for i in range(N):
s = input()
if s[0] in "EQUINOX":
sar += A
else:
anu += B
if sar == anu:
print("DRAW")
elif sar > anu:
print("SARTHAK")
else:
print("ANURADHA")
| 18.192308 | 66 | 0.496829 |
79468802e85b1f0c69fe15c04b204fde86b55c55 | 725 | py | Python | test/conectardb.py | eadev/usqele | 4ec69f24cae60ddd35f8c1268c0de9c9141434a5 | [
"MIT"
] | 1 | 2022-03-12T01:37:07.000Z | 2022-03-12T01:37:07.000Z | test/conectardb.py | eadev/usqele | 4ec69f24cae60ddd35f8c1268c0de9c9141434a5 | [
"MIT"
] | null | null | null | test/conectardb.py | eadev/usqele | 4ec69f24cae60ddd35f8c1268c0de9c9141434a5 | [
"MIT"
] | 1 | 2021-12-17T01:08:39.000Z | 2021-12-17T01:08:39.000Z | import pymysql.cursors
def start():
# CONECTARSE A LA BASE DE DATOS
connection = pymysql.connect(host=HOST,
user=USER,
password=PASSWORD,
database=BD,
cursorclass=pymysql.cursors.DictCursor)
with connection: # SI SE CONECTO EJECUTA EL CÓDIGO SIGUIENTE
with connection.cursor() as cursor: # SI PUEDE GENERAR EL CURSOS EJECUTA LO SIGUIENTE
# Create a new record
sql = "SHOW TABLES"
cursor.execute(sql)
tablas = cursor.fetchall()
for tabla in tablas:
print(f"{tabla['Tables_in_usqele']}")
if __name__ == '__main__':
start()
| 32.954545 | 91 | 0.553103 |
79468812a22d34158516042d6557a2978d0c36c0 | 1,095 | py | Python | kdesupport/grantlee/grantlee.py | gitdevmod/craft-blueprints-kde | 81a866d2d606dabd57347fbac7cdab42979332dd | [
"BSD-2-Clause"
] | null | null | null | kdesupport/grantlee/grantlee.py | gitdevmod/craft-blueprints-kde | 81a866d2d606dabd57347fbac7cdab42979332dd | [
"BSD-2-Clause"
] | null | null | null | kdesupport/grantlee/grantlee.py | gitdevmod/craft-blueprints-kde | 81a866d2d606dabd57347fbac7cdab42979332dd | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import info
class subinfo(info.infoclass):
def setTargets(self):
self.svnTargets['master'] = "https://github.com/steveire/grantlee.git"
self.targets["5.2.0"] = "https://github.com/steveire/grantlee/archive/v5.2.0.tar.gz"
self.targetDigests["5.2.0"] = (['139acee5746b957bdf1327ec0d97c604d4c0b9be42aec5d584297cb5ed6a990a'], CraftHash.HashAlgorithm.SHA256)
self.targetInstSrc["5.2.0"] = "grantlee-5.2.0"
self.defaultTarget = "5.2.0"
if CraftCore.compiler.isMacOS:
self.patchToApply['5.2.0'] = [("0001-Don-t-use-dot-in-folder-name-to-prevent-macOS-issues.patch", 1)]
self.patchLevel["5.2.0"] = 1
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["libs/qt5/qtscript"] = None # optional dep, but we probably want to have it enabled
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
| 37.758621 | 140 | 0.677626 |
79468ad547b2835e84c8fc61d525a99c92c5c57c | 171 | py | Python | app.py | rturatti/labdevops-experience | ead33374b10df19fdda385e487542c29b813f6df | [
"MIT"
] | null | null | null | app.py | rturatti/labdevops-experience | ead33374b10df19fdda385e487542c29b813f6df | [
"MIT"
] | 1 | 2021-11-25T16:41:27.000Z | 2021-11-25T16:41:27.000Z | app.py | rturatti/labdevops-experience | ead33374b10df19fdda385e487542c29b813f6df | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route("/")
def pagina_inicial():
return "Hello World - Rodrigo Turatti"
if __name__ == '__main__':
app.run()
| 15.545455 | 42 | 0.672515 |
79468aec3a5d1251bc398d7e5dcc8efae2cfbccf | 1,774 | py | Python | examples/ad_manager/v202005/order_service/get_all_orders.py | jasperan/googleads-python-lib | 6add9a7cc6148e98ada8097586a8eb1b47b2a8fd | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v202005/order_service/get_all_orders.py | jasperan/googleads-python-lib | 6add9a7cc6148e98ada8097586a8eb1b47b2a8fd | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v202005/order_service/get_all_orders.py | jasperan/googleads-python-lib | 6add9a7cc6148e98ada8097586a8eb1b47b2a8fd | [
"Apache-2.0"
] | 1 | 2021-06-23T09:15:34.000Z | 2021-06-23T09:15:34.000Z | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all orders.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v202005')
# Create a statement to select orders.
statement = ad_manager.StatementBuilder(version='v202005')
# Retrieve a small amount of orders at a time, paging
# through until all orders have been retrieved.
while True:
response = order_service.getOrdersByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
for order in response['results']:
# Print out some information for each order.
print('Order with ID "%d" and name "%s" was found.\n' % (order['id'],
order['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| 35.48 | 80 | 0.704622 |
79468b9680371d13bcbecf2959a3704a7454eaef | 878 | py | Python | candlepb/Combo/problem_exp6.py | bigwater/candlepb | c513f6c85492c936eebff07ba40f1f4f062919c7 | [
"BSD-2-Clause"
] | 1 | 2021-01-21T04:25:50.000Z | 2021-01-21T04:25:50.000Z | candlepb/Combo/problem_exp6.py | bigwater/candlepb | c513f6c85492c936eebff07ba40f1f4f062919c7 | [
"BSD-2-Clause"
] | null | null | null | candlepb/Combo/problem_exp6.py | bigwater/candlepb | c513f6c85492c936eebff07ba40f1f4f062919c7 | [
"BSD-2-Clause"
] | 1 | 2021-02-01T01:37:30.000Z | 2021-02-01T01:37:30.000Z | from deephyper.benchmark import Problem
from candlepb.Combo.models.candle_mlp_5 import create_structure
# We create our Problem object with the Problem class, you don't have to name your Problem object 'Problem' it can be any name you want. You can also define different problems in the same module.
Problem = Problem()
# You define the create structure function. This function will return an object following the Structure interface. You can also have kwargs arguments such as 'num_cells' for this function.
Problem.add_dim('create_structure', {
'func': create_structure
})
# You define the hyperparameters used to train your generated models during the search.
Problem.add_dim('hyperparameters', {
'num_epochs': 1,
})
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == '__main__':
print(Problem)
| 43.9 | 195 | 0.780182 |
79468bbd2769af37355edc49a1dd99e9f56c538e | 283 | py | Python | djconnectwise/migrations/0086_merge_20190507_1402.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | 10 | 2017-04-27T19:51:38.000Z | 2020-10-09T17:21:23.000Z | djconnectwise/migrations/0086_merge_20190507_1402.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | 45 | 2017-02-07T22:52:07.000Z | 2021-11-25T21:45:44.000Z | djconnectwise/migrations/0086_merge_20190507_1402.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | 9 | 2017-01-27T00:07:33.000Z | 2021-07-12T19:48:27.000Z | # Generated by Django 2.1 on 2019-05-07 14:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0085_auto_20190506_1028'),
('djconnectwise', '0085_auto_20190430_1004'),
]
operations = [
]
| 18.866667 | 53 | 0.664311 |
79468bf27b32c88c6c3a5cdc5f365ffcf124ce34 | 728 | py | Python | png.py | kaleksandrov/advanced-python-training | 0d1927cbb7bf9c72668341264c56f314d21ef2c4 | [
"MIT"
] | null | null | null | png.py | kaleksandrov/advanced-python-training | 0d1927cbb7bf9c72668341264c56f314d21ef2c4 | [
"MIT"
] | null | null | null | png.py | kaleksandrov/advanced-python-training | 0d1927cbb7bf9c72668341264c56f314d21ef2c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import struct
PNG_HEADER=b"\x89PNG\r\n\x1a\n"
CHUNK_SIZE=4096
def print_binary_file(filename):
with open(filename, 'rb') as f:
for chunk in f.read(CHUNK_SIZE):
print(chunk, end=' ')
def check_if_png(filename):
with open(filename, 'rb') as f:
header = f.read(8)
f.seek(16)
dimensions = f.read(8)
if header == PNG_HEADER:
print('This is a valid PNG file!!')
width,height=struct.unpack(">2L", dimensions)
print("Dimensions: {} x {}".format(width, height))
else:
raise Exception('Not a PNG file!!')
if __name__=='__main__':
print_binary_file('img.png')
check_if_png('img.png')
| 23.483871 | 62 | 0.589286 |
79468c1c0200960b3cf52c0553f181646445fcf2 | 37,682 | py | Python | TSIClient/TSIClient.py | jbrown15/RaaLabs-TSIClient | 5d0c4bdd130647e01552e878f2594e1978546054 | [
"MIT"
] | null | null | null | TSIClient/TSIClient.py | jbrown15/RaaLabs-TSIClient | 5d0c4bdd130647e01552e878f2594e1978546054 | [
"MIT"
] | 1 | 2021-06-02T02:19:55.000Z | 2021-06-02T02:19:55.000Z | TSIClient/TSIClient.py | jbrown15/RaaLabs-TSIClient | 5d0c4bdd130647e01552e878f2594e1978546054 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import json
import pandas as pd
import requests
import logging
from TSIClient.exceptions import TSIEnvironmentError
from TSIClient.exceptions import TSIStoreError
from TSIClient.exceptions import TSIQueryError
class TSIClient():
"""TSIClient. Holds methods to interact with an Azure TSI environment.
This class can be used to retrieve time series data from Azure TSI. Data
is retrieved in form of a pandas dataframe, which allows subsequent analysis
by data analysts, data scientists and developers.
It can be instantiated either by arguments or by environment variables (if arguments
are specified, they take precedence even when environment variables are set).
Args:
enviroment (str): The name of the Azure TSI environment.
client_id (str): The client id of the service principal used to authenticate with Azure TSI.
client_secret (str): The client secret of the service principal used to authenticate with Azure TSI.
tenant_id (str): The tenant id of the service principal used to authenticate with Azure TSI.
applicationName (str): The name can be an arbitrary string. For informational purpose.
Examples:
The TSIClient is the entry point to the SDK. You can instantiate it like this:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient(
... enviroment="<your-tsi-env-name>",
... client_id="<your-client-id>",
... client_secret="<your-client-secret>",
... tenant_id="<your-tenant-id>",
... applicationName="<your-app-name>">
... )
You might find it useful to specify environment variables to instantiate the TSIClient.
To do so, you need to set the following environment variables:
* ``TSICLIENT_APPLICATION_NAME``
* ``TSICLIENT_ENVIRONMENT_NAME``
* ``TSICLIENT_CLIENT_ID``
* ``TSICLIENT_CLIENT_SECRET``
* ``TSICLIENT_TENANT_ID``
Now you can instantiate the TSIClient without passing any arguments:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
"""
def __init__(
self,
enviroment=None,
client_id=None,
client_secret=None,
applicationName=None,
tenant_id=None
):
self._apiVersion = "2018-11-01-preview"
self._applicationName = applicationName if applicationName is not None else os.environ["TSICLIENT_APPLICATION_NAME"]
self._enviromentName = enviroment if enviroment is not None else os.environ["TSICLIENT_ENVIRONMENT_NAME"]
self._client_id = client_id if client_id is not None else os.environ["TSICLIENT_CLIENT_ID"]
self._client_secret = client_secret if client_secret is not None else os.environ["TSICLIENT_CLIENT_SECRET"]
self._tenant_id = tenant_id if tenant_id is not None else os.environ["TSICLIENT_TENANT_ID"]
def _getToken(self):
"""Gets an authorization token from the Azure TSI api which is used to authenticate api calls.
Returns:
str: The authorization token.
"""
url = "https://login.microsoftonline.com/{0!s}/oauth2/token".format(self._tenant_id)
payload = {
"grant_type":"client_credentials",
"client_id":self._client_id,
"client_secret": self._client_secret,
"resource":"https%3A%2F%2Fapi.timeseries.azure.com%2F&undefined="
}
payload = "grant_type={grant_type}&client_id={client_id}&client_secret={client_secret}&resource={resource}".format(**payload)
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
try:
response = requests.request("POST", url, data=payload, headers=headers, timeout=10)
response.raise_for_status()
except requests.exceptions.ConnectTimeout:
logging.error("TSIClient: The request to the TSI api timed out.")
raise
except requests.exceptions.HTTPError as e:
status_code = e.response.status_code
if status_code == 401:
logging.error("TSIClient: Authentication with the TSI api was unsuccessful. Check your client secret.")
else:
logging.error("TSIClient: The request to the TSI api returned an unsuccessfull status code. Check the stack trace")
raise
jsonResp = json.loads(response.text)
tokenType = jsonResp['token_type']
authorizationToken = tokenType +" " + jsonResp['access_token']
return authorizationToken
def _getQueryString(self, useWarmStore=None):
"""Creates the querystring for an api request.
Can be used in all api requests in TSIClient JB.
Args:
useWarmStore (bool): A boolean to indicate the storeType. Defaults to None,
in which case no storeType param is included in the querystring.
Returns:
dict: The querystring with the api-version and optionally the storeType.
"""
if useWarmStore == None:
return {"api-version": self._apiVersion}
else:
return {
"api-version": self._apiVersion,
"storeType": "WarmStore" if useWarmStore == True else "ColdStore"
}
def _getVariableAggregate(self, aggregate=None):
"""Creates the variable aggregation type and the request type based thereon.
The request type is either "aggregateSeries" (if an aggregation is provided),
or "getSeries" if the aggregate is None.
Args:
aggregate (str): The aggregation method ("avg", "min", "max").
Returns:
tuple: A tuple with the aggregate (dict) and the requestType (str).
"""
if aggregate not in ["avg", "min", "max", None]:
raise TSIQueryError(
"TSIClient: Aggregation method not supported, must be \"avg\", \"min\" or \"max\"."
)
if aggregate != None:
aggregate = {"tsx": "{0!s}($value)".format(aggregate)}
requestType = "aggregateSeries"
else:
requestType = "getSeries"
return (aggregate, requestType)
def getEnviroment(self):
"""Gets the id of the environment specified in the TSIClient class constructor.
Returns:
str: The environment id.
Raises:
TSIEnvironmentError: Raised if the TSI environment does not exist.
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> env = client.getEnviroment()
"""
authorizationToken = self._getToken()
url = "https://api.timeseries.azure.com/environments"
querystring = self._getQueryString()
payload = ""
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'Content-Type': "application/json",
'cache-control': "no-cache"
}
try:
response = requests.request("GET", url, data=payload, headers=headers, params=querystring, timeout=10)
response.raise_for_status()
except requests.exceptions.ConnectTimeout:
logging.error("TSIClient: The request to the TSI api timed out.")
raise
except requests.exceptions.HTTPError:
logging.error("TSIClient: The request to the TSI api returned an unsuccessfull status code.")
raise
environments = json.loads(response.text)['environments']
environmentId = None
for enviroment in environments:
if enviroment['displayName'] == self._enviromentName:
environmentId = enviroment['environmentId']
break
if environmentId == None:
raise TSIEnvironmentError("TSIClient: TSI environment not found. Check the spelling or create an environment in Azure TSI.")
return environmentId
def getEnvironmentAvailability(self):
"""Returns the time range and distribution of event count over the event timestamp.
Can be used to provide landing experience of navigating to the environment.
Returns:
dict: The environment availability. Contains interval size, distribution and range.
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> env_availability = client.getEnvironmentAvailability()
"""
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
url = "https://{environmentId}.env.timeseries.azure.com/availability".format(
environmentId=environmentId,
)
querystring = self._getQueryString()
payload = ""
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'Content-Type': "application/json",
'cache-control': "no-cache"
}
try:
response = requests.request(
"GET",
url,
data=payload,
headers=headers,
params=querystring,
timeout=10
)
response.raise_for_status()
except requests.exceptions.ConnectTimeout:
logging.error("TSIClient: The request to the TSI api timed out.")
raise
except requests.exceptions.HTTPError:
logging.error("TSIClient: The request to the TSI api returned an unsuccessfull status code.")
raise
return json.loads(response.text)
def getInstances(self):
"""Gets all instances (timeseries) from the specified TSI environment.
Returns:
dict: The instances in form of the response from the TSI api call.
Contains typeId, timeSeriesId, name, description, hierarchyIds and instanceFields per instance.
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> instances = client.getInstances()
"""
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
url = "https://" + environmentId + ".env.timeseries.azure.com/timeseries/instances/"
querystring = self._getQueryString()
payload = ""
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
if response.text:
jsonResponse = json.loads(response.text)
result = jsonResponse
while len(jsonResponse['instances'])>999 and 'continuationToken' in list(jsonResponse.keys()):
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'x-ms-continuation' : jsonResponse['continuationToken'],
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
if response.text:
jsonResponse = json.loads(response.text)
result['instances'].extend(jsonResponse['instances'])
return result
def getHierarchies(self):
"""Gets all hierarchies from the specified TSI environment.
Returns:
dict: The hierarchies in form of the response from the TSI api call.
Contains hierarchy id, names and source fields per hierarchy.
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> hierarchies = client.getHierarchies()
"""
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
url = "https://" + environmentId + ".env.timeseries.azure.com/timeseries/hierarchies"
querystring = self._getQueryString()
payload = ""
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'Content-Type': "application/json",
'cache-control': "no-cache"
}
try:
response = requests.request(
"GET",
url,
data=payload,
headers=headers,
params=querystring,
timeout=10
)
response.raise_for_status()
except requests.exceptions.ConnectTimeout:
logging.error("TSIClient: The request to the TSI api timed out.")
raise
except requests.exceptions.HTTPError:
logging.error("TSIClient: The request to the TSI api returned an unsuccessfull status code.")
raise
return json.loads(response.text)
def getTypes(self):
"""Gets all types from the specified TSI environment.
Returns:
dict: The types in form of the response from the TSI api call.
Contains id, name, description and variables per type.
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> types = client.getTypes()
"""
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
url = "https://" + environmentId + ".env.timeseries.azure.com/timeseries/types"
querystring = self._getQueryString()
payload = ""
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'Content-Type': "application/json",
'cache-control': "no-cache"
}
try:
response = requests.request(
"GET",
url,
data=payload,
headers=headers,
params=querystring,
timeout=10
)
response.raise_for_status()
except requests.exceptions.ConnectTimeout:
logging.error("TSIClient: The request to the TSI api timed out.")
raise
except requests.exceptions.HTTPError:
logging.error("TSIClient: The request to the TSI api returned an unsuccessfull status code.")
raise
return json.loads(response.text)
def getTypeTsx(self):
"""Extracts type id and Value (tsx) from types from the specified TSI environment.
Returns:
dict: The types collected from the response from the TSI api call.
Contains id and variable value (tsx) per type.
Only type instances with the JSON build up
type > variables > Value > value > tsx
are returned.
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> types = client.getTypeTsx()
"""
types={}
jsonResponse = self.getTypes()
for typeElement in jsonResponse['types']:
try:
typeElement['variables']['Value']['value']['tsx']
types[typeElement['id']] = typeElement['variables']['Value']['value']['tsx']
except:
logging.error('"Value" for type id {type} cannot be extracted'.format(type = typeElement['id']))
pass
return types
def _updateTimeSeries(self, payload, timeseries):
"""Writes instances to the TSI environment.
Args:
payload (str): A json-serializable payload that is posted to the TSI environment.
The format of the payload is specified in the Azure TSI documentation.
Returns:
dict: The response of the TSI api call.
"""
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
url = "https://{environmentId}.env.timeseries.azure.com/timeseries/{timeseries}/$batch".format(environmentId=environmentId,timeseries=timeseries)
querystring = self._getQueryString()
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers, params=querystring)
if response.text:
jsonResponse = json.loads(response.text)
return jsonResponse
def writeInstance(self, payload):
jsonResponse = self._updateTimeSeries(payload, 'instances')
return jsonResponse
def writeTypes(self, payload):
jsonResponse = self._updateTimeSeries(payload, 'types')
return jsonResponse
def writeHierarchies(self, payload):
jsonResponse = self._updateTimeSeries(payload, 'hierarchies')
return jsonResponse
def deleteInstances(self, instances):
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
instancesList = list()
for i in range(0,len(instances)):
instance = instances[i]
if instance == None or len(instance)<36:
continue
instancesList.append([instance])
payload = {"delete":{"timeSeriesIds":instancesList}}
url = "https://" + environmentId + ".env.timeseries.azure.com/timeseries/instances/$batch"
querystring = self._getQueryString()
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers, params=querystring)
# Test if response body contains sth.
if response.text:
jsonResponse = json.loads(response.text)
return jsonResponse
def deleteAllInstances(self):
instances = self.getInstances()['instances']
instancesList = list()
for i in range(0,len(instances)):
instance = instances[i]['timeSeriesId'][0]
if instance == None or len(instance)<36:
continue
instancesList.append([instance])
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
payload = {"delete":{"timeSeriesIds":instancesList}}
url = "https://" + environmentId + ".env.timeseries.azure.com/timeseries/instances/$batch"
querystring = self._getQueryString()
headers = {
'x-ms-client-application-name': self._applicationName,
'Authorization': authorizationToken,
'Content-Type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers, params=querystring)
# Test if response body contains sth.
if response.text:
jsonResponse = json.loads(response.text)
return jsonResponse
def getNameById(self, ids):
"""Returns the timeseries names that correspond to the given ids.
Args:
ids (list): The ids for which to get names.
Returns:
list: The timeseries names, None if timeseries id does not exist in the TSI environment.
"""
result=self.getInstances()
timeSeriesNames=[]
idMap={}
for instance in result['instances']:
if 'timeSeriesId' in instance:
idMap[instance['timeSeriesId'][0]] = instance
for ID in ids:
if ID in idMap:
timeSeriesNames.append(idMap[ID]['name'])
else:
timeSeriesNames.append(None)
return timeSeriesNames
def getIdByAssets(self, asset):
"""Returns the timeseries ids that belong to a given asset.
Args:
asset (str): The asset name.
Returns:
list: The timeseries ids.
"""
result=self.getInstances()
timeSeriesIds=[]
nameMap={}
for instance in result['instances']:
if 'name' in instance and asset in instance['name']:
nameMap[instance['name']] = instance
timeSeriesIds.append(instance['timeSeriesId'][0])
else:
continue#timeSeriesIds.append(None)
return timeSeriesIds
def getIdByName(self, names):
"""Returns the timeseries ids that correspond to the given names.
Args:
names (list(str)): The names for which to get ids.
Returns:
list: The timeseries ids, None if timeseries name does not exist in the TSI environment.
"""
result=self.getInstances()
timeSeriesIds=[]
nameMap={}
for instance in result['instances']:
if 'name' in instance:
nameMap[instance['name']] = instance
for name in names:
if name in nameMap:
timeSeriesIds.append(nameMap[name]['timeSeriesId'][0])
else:
timeSeriesIds.append(None)
return timeSeriesIds
def getIdByDescription(self, names):
"""Returns the timeseries ids that correspond to the given descriptions.
Args:
names (list): The descriptions for which to get ids.
Returns:
list: The timeseries ids, None if timeseries description does not exist in the TSI environment.
"""
result=self.getInstances()
timeSeriesIds=[]
nameMap={}
for instance in result['instances']:
if 'description' in instance:
nameMap[instance['description']] = instance
for name in names:
if name in nameMap:
timeSeriesIds.append(nameMap[name]['timeSeriesId'][0])
else:
timeSeriesIds.append(None)
return timeSeriesIds
def getTypeByDescription(self, names):
"""Returns the type ids that correspond to the given descriptions.
Args:
names (list): The descriptions for which to get type ids.
Returns:
list: The type ids, None if timeseries description does not exist in the TSI environment.
"""
result=self.getInstances()
typeIds=[]
nameMap={}
for instance in result['instances']:
if 'description' in instance:
nameMap[instance['description']] = instance
for name in names:
if name in nameMap:
typeIds.append(nameMap[name]['typeId'])
else:
typeIds.append(None)
return typeIds
def getTypeById(self, ids):
"""Returns the type ids that correspond to the given timeseries ids.
Args:
ids (list): The timeseries ids for which to get type ids.
Returns:
list: The type ids, None if timeseries ids does not exist in the TSI environment.
"""
result=self.getInstances()
typeIds=[]
idMap={}
for instance in result['instances']:
if 'timeSeriesId' in instance:
idMap[instance['timeSeriesId'][0]] = instance
for ID in ids:
if ID in idMap:
typeIds.append(idMap[ID]['typeId'])
else:
typeIds.append(None)
return typeIds
def getTypeByName(self, names):
"""Returns the type ids that correspond to the given names.
Args:
names (list(str)): The names for which to get ids.
Returns:
list: The type ids, None if timeseries name does not exist in the TSI environment.
"""
result=self.getInstances()
typeIds=[]
nameMap={}
for instance in result['instances']:
if 'name' in instance:
nameMap[instance['name']] = instance
for name in names:
if name in nameMap:
typeIds.append(nameMap[name]['typeId'])
else:
typeIds.append(None)
return typeIds
def getDataByName(self, variables, timespan, interval, aggregate=None, useWarmStore=False):
"""Returns a dataframe with timestamps and values for the time series names given in "variables".
Can be used to return data for single and multiple timeseries. Names must be exact matches.
Args:
variables (list): The variable names. Corresponds to the "name/Time Series Name" field of the time series instances.
timespan (list): A list of two timestamps. First list element ist the start time, second element is the end time.
Example: timespan=['2019-12-12T15:35:11.68Z', '2019-12-12T17:02:05.958Z']
interval (str): The time interval that is used during aggregation. Must follow the ISO-8601 duration format.
Example: interval="PT1M", for 1 minute aggregation.
aggregate (str): Supports "min", "max", "avg". Can be None, in which case the raw events are returned. Defaults to None.
useWarmStore (bool): If True, the query is executed on the warm storage (free of charge), otherwise on the cold storage. Defaults to False.
Returns:
A pandas dataframe with timeseries data. Columns are ordered the same way as the variable names.
Raises:
TSIStoreError: Raised if the was tried to execute on the warm store, but the warm store is not enabled.
TSIQueryError: Raised if there was an error in the query arguments (e.g. wrong formatting).
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> data = client.getDataByName(
... variables=["timeseries_name_1", "timeseries_name_2"],
... timespan=["2020-01-25T10:00:11.68Z", "2020-01-26T13:45:11.68Z"],
... interval="PT5M",
... aggregate="avg",
... useWarmStore=False
... )
"""
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
url = "https://" + environmentId + ".env.timeseries.azure.com/timeseries/query?"
querystring = self._getQueryString(useWarmStore=useWarmStore)
timeseries = self.getIdByName(variables)
types = self.getTypeByName(variables)
aggregate, requestType = self._getVariableAggregate(aggregate=aggregate)
return self._getData(
timeseries=timeseries,
types = types,
url=url,
querystring=querystring,
requestType=requestType,
timespan=timespan,
interval=interval,
aggregate=aggregate,
authorizationToken=authorizationToken,
otherColNamesThanTimeseriesIds=variables,
)
def getDataByDescription(self, variables, TSName, timespan, interval, aggregate=None, useWarmStore=False):
"""Returns a dataframe with timestamp and values for the time series that match the description given in "variables".
Can be used to return data for single and multiple timeseries. Descriptions must be exact matches.
Args:
variables (list): The variable descriptions. Corresponds to the "description" field of the time series instances.
TSName (list): The column names for the refurned dataframe. Must be in the same order as the variable descriptions.
These names can be arbitrary and do not need to coincide with the timeseries names in TSI.
timespan (list): A list of two timestamps. First list element ist the start time, second element is the end time.
Example: timespan=['2019-12-12T15:35:11.68Z', '2019-12-12T17:02:05.958Z']
interval (str): The time interval that is used during aggregation. Must follow the ISO-8601 duration format.
Example: interval="PT1M", for 1 minute aggregation. If "aggregate" is None, the raw events are returned.
aggregate (str): Supports "min", "max", "avg". Can be None, in which case the raw events are returned. Defaults to None.
useWarmStore (bool): If True, the query is executed on the warm storage (free of charge), otherwise on the cold storage. Defaults to False.
Returns:
A pandas dataframe with timeseries data. Columns are ordered the same way as the variable descriptions.
Raises:
TSIStoreError: Raised if the was tried to execute on the warm store, but the warm store is not enabled.
TSIQueryError: Raised if there was an error in the query arguments (e.g. wrong formatting).
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> data = client.getDataByDescription(
... variables=["timeseries_description_1", "timeseries_description_2"],
... TSName=["my_timeseries_name_1", "my_timeseries_name_2"]
... timespan=["2020-01-25T10:00:11.68Z", "2020-01-26T13:45:11.68Z"],
... interval="PT5M",
... aggregate="avg",
... useWarmStore=False
... )
"""
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
url = "https://" + environmentId + ".env.timeseries.azure.com/timeseries/query?"
querystring = self._getQueryString(useWarmStore=useWarmStore)
timeseries = self.getIdByDescription(variables)
types = self.getTypeByDescription(variables)
aggregate, requestType = self._getVariableAggregate(aggregate=aggregate)
return self._getData(
timeseries=timeseries,
types=types,
url=url,
querystring=querystring,
requestType=requestType,
timespan=timespan,
interval=interval,
aggregate=aggregate,
authorizationToken=authorizationToken,
otherColNamesThanTimeseriesIds=TSName
)
def getDataById(self, timeseries, timespan, interval, aggregate=None, useWarmStore=False):
"""Returns a dataframe with timestamp and values for the time series that match the description given in "timeseries".
Can be used to return data for single and multiple timeseries. Timeseries ids must be an exact matches.
Args:
timeseries (list): The timeseries ids. Corresponds to the "timeSeriesId" field of the time series instances.
timespan (list): A list of two timestamps. First list element ist the start time, second element is the end time.
Example: timespan=['2019-12-12T15:35:11.68Z', '2019-12-12T17:02:05.958Z']
interval (str): The time interval that is used during aggregation. Must follow the ISO-8601 duration format.
Example: interval="PT1M", for 1 minute aggregation. If "aggregate" is None, the raw events are returned.
aggregate (str): Supports "min", "max", "avg". Can be None, in which case the raw events are returned. Defaults to None.
useWarmStore (bool): If True, the query is executed on the warm storage (free of charge), otherwise on the cold storage. Defaults to False.
Returns:
A pandas dataframe with timeseries data. Columns are ordered the same way as the timeseries ids.
Raises:
TSIStoreError: Raised if the was tried to execute on the warm store, but the warm store is not enabled.
TSIQueryError: Raised if there was an error in the query arguments (e.g. wrong formatting).
Example:
>>> from TSIClient import TSIClient as tsi
>>> client = tsi.TSIClient()
>>> data = client.getDataById(
... timeseries=["timeseries_id_1", "timeseries_id_2"],
... timespan=["2020-01-25T10:00:11.68Z", "2020-01-26T13:45:11.68Z"],
... interval="PT5M",
... aggregate="avg",
... useWarmStore=False
... )
"""
environmentId = self.getEnviroment()
authorizationToken = self._getToken()
url = "https://" + environmentId + ".env.timeseries.azure.com/timeseries/query?"
querystring = self._getQueryString(useWarmStore=useWarmStore)
aggregate, requestType = self._getVariableAggregate(aggregate=aggregate)
types = self.getTypeById(timeseries)
return self._getData(
timeseries=timeseries,
types=types,
url=url,
querystring=querystring,
requestType=requestType,
timespan=timespan,
interval=interval,
aggregate=aggregate,
authorizationToken=authorizationToken,
)
def _getData(
self,
timeseries,
types,
url,
querystring,
requestType,
timespan,
interval,
aggregate,
authorizationToken,
otherColNamesThanTimeseriesIds=None,
):
df = None
typeList = self.getTypeTsx()
if otherColNamesThanTimeseriesIds != None:
colNames = otherColNamesThanTimeseriesIds
else:
colNames = timeseries
for i, _ in enumerate(timeseries):
if timeseries[i] == None:
logging.error("No such tag: {tag}".format(tag=colNames[i]))
continue
logging.info(f'Timeseries {colNames[i]} has type {typeList[types[i]]}')
payload = {
requestType: {
"timeSeriesId": [timeseries[i]],
"timeSeriesName": None,
"searchSpan": {"from": timespan[0], "to": timespan[1]},
"filter": None,
"interval": interval,
"inlineVariables": {
"AverageTest": {
"kind": "numeric",
"value": {"tsx": typeList[types[i]]},
"filter": None,
"aggregation": aggregate,
},
},
"projectedVariables": ["AverageTest"],
}
}
headers = {
"x-ms-client-application-name": self._applicationName,
"Authorization": authorizationToken,
"Content-Type": "application/json",
"cache-control": "no-cache",
}
try:
response = requests.request(
"POST",
url,
data=json.dumps(payload),
headers=headers,
params=querystring,
)
response.raise_for_status()
except requests.exceptions.ConnectTimeout:
logging.error("TSIClient: The request to the TSI api timed out.")
raise
except requests.exceptions.HTTPError:
logging.error("TSIClient: The request to the TSI api returned an unsuccessfull status code.")
raise
if response.text:
response = json.loads(response.text)
if "error" in response:
if "innerError" in response["error"]:
if response["error"]["innerError"]["code"] == "TimeSeriesQueryNotSupported":
raise TSIStoreError(
"TSIClient: Warm store not enabled in TSI environment: {id}. Set useWarmStore to False."
.format(id=self._enviromentName),
)
else:
logging.error("TSIClient: The query was unsuccessful, check the format of the function arguments.")
raise TSIQueryError(response["error"])
if response["timestamps"] == []:
logging.critical("No data in search span for tag: {tag}".format(tag=colNames[i]))
continue
try:
assert i == 0
df = pd.DataFrame(
{
"timestamp": response["timestamps"],
colNames[i]: response["properties"][0]["values"],
}
)
except:
df[colNames[i]] = response["properties"][0]["values"]
finally:
logging.critical("Loaded data for tag: {tag}".format(tag=colNames[i]))
return df
| 38.76749 | 153 | 0.589167 |
79468d8ab21ea794f3cb15408968b13a7d8a79af | 5,514 | py | Python | contrib/seeds/makeseeds.py | crypto-node/MUE | 0fe297bb1bc1755d1ed0ac95f1f1b9e2e656b526 | [
"MIT"
] | 4 | 2020-06-19T11:37:16.000Z | 2021-08-02T22:56:37.000Z | contrib/seeds/makeseeds.py | crypto-node/MUE | 0fe297bb1bc1755d1ed0ac95f1f1b9e2e656b526 | [
"MIT"
] | null | null | null | contrib/seeds/makeseeds.py | crypto-node/MUE | 0fe297bb1bc1755d1ed0ac95f1f1b9e2e656b526 | [
"MIT"
] | 4 | 2020-02-27T11:26:34.000Z | 2022-01-20T13:29:19.000Z | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/BARECore:2.0.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.05814 | 186 | 0.566921 |
79468dc450d255592f65b3cb23c1aa2ad2d0214f | 1,489 | py | Python | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/linked_integration_runtime_type_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/linked_integration_runtime_type_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/linked_integration_runtime_type_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LinkedIntegrationRuntimeType(Model):
"""The base definition of a linked integration runtime.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: LinkedIntegrationRuntimeRbacAuthorization,
LinkedIntegrationRuntimeKeyAuthorization
All required parameters must be populated in order to send to Azure.
:param authorization_type: Required. Constant filled by server.
:type authorization_type: str
"""
_validation = {
'authorization_type': {'required': True},
}
_attribute_map = {
'authorization_type': {'key': 'authorizationType', 'type': 'str'},
}
_subtype_map = {
'authorization_type': {'RBAC': 'LinkedIntegrationRuntimeRbacAuthorization', 'Key': 'LinkedIntegrationRuntimeKeyAuthorization'}
}
def __init__(self, **kwargs) -> None:
super(LinkedIntegrationRuntimeType, self).__init__(**kwargs)
self.authorization_type = None
| 34.627907 | 134 | 0.652116 |
79468e5234c6ee7add40cf770e7aad871856b22f | 243 | py | Python | conanfile.py | bincrafters/conan-boost_chrono | 64f61589a92a062f956f3ce7dd6c21362552df77 | [
"MIT"
] | null | null | null | conanfile.py | bincrafters/conan-boost_chrono | 64f61589a92a062f956f3ce7dd6c21362552df77 | [
"MIT"
] | null | null | null | conanfile.py | bincrafters/conan-boost_chrono | 64f61589a92a062f956f3ce7dd6c21362552df77 | [
"MIT"
] | 2 | 2019-04-24T05:53:46.000Z | 2019-06-06T20:27:28.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import python_requires
base = python_requires("boost_base/2.0.0@bincrafters/testing")
class BoostChronoConan(base.BoostBaseConan):
name = "boost_chrono"
version = "1.70.0"
| 18.692308 | 62 | 0.707819 |
79468ede447c69937e3b5be5592caf8d1d42727a | 3,292 | py | Python | hfc/fabric_network/couchdbwalletstore.py | roviso/hyberledger-py | 908dd597e0822f99cf618f235dd517824ba44bc4 | [
"Apache-2.0"
] | 389 | 2016-09-18T11:50:10.000Z | 2022-03-29T21:45:40.000Z | hfc/fabric_network/couchdbwalletstore.py | roviso/hyberledger-py | 908dd597e0822f99cf618f235dd517824ba44bc4 | [
"Apache-2.0"
] | 112 | 2017-08-18T00:32:21.000Z | 2022-02-25T18:55:57.000Z | hfc/fabric_network/couchdbwalletstore.py | roviso/hyberledger-py | 908dd597e0822f99cf618f235dd517824ba44bc4 | [
"Apache-2.0"
] | 268 | 2016-10-12T02:56:58.000Z | 2022-03-30T09:50:54.000Z | import couchdb
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from hfc.fabric_ca.caservice import Enrollment
from hfc.fabric.user import User
from hfc.fabric.user import validate
from hfc.util.crypto.crypto import ecies
class CouchDBWalletStore(object):
"""CouchDBWalletStore stores the identities of users and admins
in a CouchDB with given config
ie. it contains the Private Key and Enrollment Certificate
"""
def __init__(self, dbName, config='http://localhost:5984'):
self.server = couchdb.Server(config)
try:
self.db = self.server[dbName]
except Exception:
self.db = self.server.create(dbName)
def exists(self, enrollment_id):
"""Returns whether or not the creds of a user with a given user_id
exists in the wallet
:param enrollment_id: enrollment id
:return: True or False
"""
try:
self.db[enrollment_id]
return True
except Exception:
return False
def remove(self, enrollment_id):
"""deletes identities of user with given enrollment_id
:param enrollment_id: enrollment id
:return:
"""
self.db.delete(self.db[enrollment_id])
def put(self, enrollment_id, user_enrollment):
"""Saves the particular Identity in the wallet
:param enrollment_id: enrollment id
:param user_enrollment: Enrollment object
:return:
"""
if not isinstance(user_enrollment, Enrollment):
raise ValueError('"user_enrollment" is not a valid Enrollment object')
PrivateKey = user_enrollment.private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
).decode()
EnrollmentCert = user_enrollment.cert.decode()
doc = {'EnrollmentCert': EnrollmentCert, 'PrivateKey': PrivateKey}
self.db[enrollment_id] = doc
def create_user(self, enrollment_id, org, msp_id, state_store=None):
"""Returns an instance of a user whose identity
is stored in the CouchDBWallet
:param enrollment_id: enrollment id
:param org: organization
:param msp_id: MSP id
:param state_store: (Default value = None)
:return: a validated user instance
"""
crypto_suit = ecies()
if not self.exists(enrollment_id):
raise AttributeError('"user" does not exist')
key_pem = self.db[enrollment_id]['PrivateKey']
cert_pem = self.db[enrollment_id]['EnrollmentCert']
private_key = load_pem_private_key(key_pem, None, default_backend())
enrollment = Enrollment(private_key, cert_pem)
user = User(enrollment_id, org, state_store)
user.enrollment = enrollment
user.msp_id = msp_id
user.cryptoSuite = crypto_suit
return validate(user)
| 36.175824 | 112 | 0.635784 |
79468f059f4ce9c9298102b828700088c5c64b8a | 2,388 | py | Python | src/MOSIM/abstraction/access/remote/remote_adapter_client.py | dfki-asr/MMIPython-Core | 2f4b51ffde606c45661d9dbd5153576f919bdb8b | [
"MIT"
] | null | null | null | src/MOSIM/abstraction/access/remote/remote_adapter_client.py | dfki-asr/MMIPython-Core | 2f4b51ffde606c45661d9dbd5153576f919bdb8b | [
"MIT"
] | null | null | null | src/MOSIM/abstraction/access/remote/remote_adapter_client.py | dfki-asr/MMIPython-Core | 2f4b51ffde606c45661d9dbd5153576f919bdb8b | [
"MIT"
] | null | null | null | ## SPDX-License-Identifier: MIT
## The content of this file has been developed in the context of the MOSIM research project.
## Original author(s): Jannes Lehwald
# -*- coding: utf-8 -*-
"""
"""
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TCompactProtocol
from MOSIM.mmi.register.ttypes import MAdapterDescription
#from MMIStandard import MMIAdapter
from MOSIM.abstraction.access.interface.adapter_client import IAdapterClient
class RemoteAdapterClient(IAdapterClient):
"""
A wrapper for a adapter client connection
Attributes
----------
_address : str
The address of the service
_port : str
The port of the service
_transport : TTransport
The thrift transport
_acces : MMIAdapter.Iface
The actual access
"""
def __init__(self, address, port):
"""
Constructor which needs an address, a port and an access_type.
Parameters
----------
address : str
The address of the service
port : str
The port of the service
"""
assert(isinstance(address, str)), "The address is no string"
assert(isinstance(port, int)), "The port is no int"
super(RemoteAdapterClient, self).__init__()
if address == "127.0.0.1":
address = "localhost"
self._address = address
self._port = port
try:
self._transport = TSocket.TSocket(host=self._address, port=self._port)
self._transport = TTransport.TBufferedTransport(self._transport)
protocol = TCompactProtocol.TCompactProtocol(self._transport)
self._access = MMIAdapter.Client(protocol)
self._transport.open()
print('Connected to to adapter {0}:{1}'.format(address, port))
except:
print('Could not connect to to adapter {0}:{1}'.format(address, port))
def dispose(self):
"""
Closes the connection to the adapter.
"""
super(RemoteAdapterClient, self).dispose()
try:
self._transport.close()
except:
print('Could not close connection to adapter {0}:{1}'.format(self._address, self._port)) | 28.771084 | 100 | 0.600503 |
79468fe34b85be16d384aba1e89267bc3024e813 | 1,262 | py | Python | wavesim/tests/python/test_BuildInfo.py | TheComet/wavesim | 90125d5273b96633e5f74666ddb707cedfa2fbf1 | [
"Apache-2.0"
] | 7 | 2018-01-25T10:58:39.000Z | 2021-05-08T08:08:37.000Z | wavesim/tests/python/test_BuildInfo.py | TheComet/wavesim | 90125d5273b96633e5f74666ddb707cedfa2fbf1 | [
"Apache-2.0"
] | 4 | 2018-03-06T15:47:13.000Z | 2018-03-07T19:07:45.000Z | wavesim/tests/python/test_BuildInfo.py | TheComet/wavesim | 90125d5273b96633e5f74666ddb707cedfa2fbf1 | [
"Apache-2.0"
] | 2 | 2018-02-18T02:02:31.000Z | 2020-02-16T09:49:12.000Z | import wavesim
import unittest
class TestBuildInfo(unittest.TestCase):
def test_build_info(self):
self.assertTrue(isinstance(wavesim.build_info, str))
self.assertTrue(len(wavesim.build_info) > 10)
def test_build_number(self):
self.assertTrue(isinstance(wavesim.build_number, int))
self.assertTrue(wavesim.build_number > 0)
def test_build_host(self):
self.assertTrue(isinstance(wavesim.build_host, str))
self.assertTrue(len(wavesim.build_info) > 10)
def test_build_time(self):
from datetime import datetime
self.assertTrue(isinstance(wavesim.build_time, str))
try:
datetime.strptime(wavesim.build_time, "%Y-%m-%dT%H:%M:%SZ")
except:
self.fail()
def test_commit_info(self):
self.assertTrue(isinstance(wavesim.commit_info, str))
self.assertTrue(len(wavesim.commit_info) > 10)
def test_compiler_info(self):
self.assertTrue(isinstance(wavesim.compiler_info, str))
self.assertTrue(len(wavesim.commit_info) > 1)
def test_cmake_configuration(self):
self.assertTrue(isinstance(wavesim.cmake_configuration, str))
self.assertTrue(len(wavesim.cmake_configuration) > 10)
unittest.main()
| 33.210526 | 71 | 0.691759 |
7946905946aa974b9e22a3fdc9143d26f75693e1 | 4,282 | py | Python | nipyapi/nifi/models/variable_entity.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/variable_entity.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | 1 | 2018-11-13T21:01:33.000Z | 2018-11-13T21:01:33.000Z | nipyapi/nifi/models/variable_entity.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VariableEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'variable': 'VariableDTO',
'can_write': 'bool'
}
attribute_map = {
'variable': 'variable',
'can_write': 'canWrite'
}
def __init__(self, variable=None, can_write=None):
"""
VariableEntity - a model defined in Swagger
"""
self._variable = None
self._can_write = None
if variable is not None:
self.variable = variable
if can_write is not None:
self.can_write = can_write
@property
def variable(self):
"""
Gets the variable of this VariableEntity.
The variable information
:return: The variable of this VariableEntity.
:rtype: VariableDTO
"""
return self._variable
@variable.setter
def variable(self, variable):
"""
Sets the variable of this VariableEntity.
The variable information
:param variable: The variable of this VariableEntity.
:type: VariableDTO
"""
self._variable = variable
@property
def can_write(self):
"""
Gets the can_write of this VariableEntity.
Indicates whether the user can write a given resource.
:return: The can_write of this VariableEntity.
:rtype: bool
"""
return self._can_write
@can_write.setter
def can_write(self, can_write):
"""
Sets the can_write of this VariableEntity.
Indicates whether the user can write a given resource.
:param can_write: The can_write of this VariableEntity.
:type: bool
"""
self._can_write = can_write
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VariableEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.805195 | 479 | 0.555348 |
794690ddb0b17b33d14c802e813d1a6df5666d75 | 15,837 | py | Python | venv/lib/python3.9/site-packages/_pytest/runner.py | almmello/frozen | c9928491f694b56a0023926bc763c703ba1fd75a | [
"BSD-2-Clause"
] | 9 | 2019-05-29T23:50:28.000Z | 2021-01-29T20:51:05.000Z | venv/lib/python3.9/site-packages/_pytest/runner.py | almmello/frozen | c9928491f694b56a0023926bc763c703ba1fd75a | [
"BSD-2-Clause"
] | 61 | 2020-10-12T13:34:56.000Z | 2022-03-28T03:02:21.000Z | venv/lib/python3.9/site-packages/_pytest/runner.py | almmello/frozen | c9928491f694b56a0023926bc763c703ba1fd75a | [
"BSD-2-Clause"
] | 3 | 2020-05-25T02:38:08.000Z | 2021-01-20T06:23:06.000Z | """Basic collect and runtest protocol implementations."""
import bdb
import os
import sys
from typing import Callable
from typing import cast
from typing import Dict
from typing import Generic
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import attr
from .reports import BaseReport
from .reports import CollectErrorRepr
from .reports import CollectReport
from .reports import TestReport
from _pytest import timing
from _pytest._code.code import ExceptionChainRepr
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import TerminalRepr
from _pytest.compat import final
from _pytest.config.argparsing import Parser
from _pytest.nodes import Collector
from _pytest.nodes import Item
from _pytest.nodes import Node
from _pytest.outcomes import Exit
from _pytest.outcomes import Skipped
from _pytest.outcomes import TEST_OUTCOME
if TYPE_CHECKING:
from typing_extensions import Literal
from _pytest.main import Session
from _pytest.terminal import TerminalReporter
#
# pytest plugin hooks.
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption(
"--durations",
action="store",
type=int,
default=None,
metavar="N",
help="show N slowest setup/test durations (N=0 for all).",
)
group.addoption(
"--durations-min",
action="store",
type=float,
default=0.005,
metavar="N",
help="Minimal duration in seconds for inclusion in slowest list. Default 0.005",
)
def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None:
durations = terminalreporter.config.option.durations
durations_min = terminalreporter.config.option.durations_min
verbose = terminalreporter.config.getvalue("verbose")
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration, reverse=True) # type: ignore[no-any-return]
if not durations:
tr.write_sep("=", "slowest durations")
else:
tr.write_sep("=", "slowest %s durations" % durations)
dlist = dlist[:durations]
for i, rep in enumerate(dlist):
if verbose < 2 and rep.duration < durations_min:
tr.write_line("")
tr.write_line(
"(%s durations < %gs hidden. Use -vv to show these durations.)"
% (len(dlist) - i, durations_min)
)
break
tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}")
def pytest_sessionstart(session: "Session") -> None:
session._setupstate = SetupState()
def pytest_sessionfinish(session: "Session") -> None:
session._setupstate.teardown_all()
def pytest_runtest_protocol(item: Item, nextitem: Optional[Item]) -> bool:
ihook = item.ihook
ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
runtestprotocol(item, nextitem=nextitem)
ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return True
def runtestprotocol(
item: Item, log: bool = True, nextitem: Optional[Item] = None
) -> List[TestReport]:
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request: # type: ignore[attr-defined]
item._initrequest() # type: ignore[attr-defined]
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.getoption("setupshow", False):
show_test_item(item)
if not item.config.getoption("setuponly", False):
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
# After all teardown hooks have been called
# want funcargs and request info to go away.
if hasrequest:
item._request = False # type: ignore[attr-defined]
item.funcargs = None # type: ignore[attr-defined]
return reports
def show_test_item(item: Item) -> None:
"""Show test function, parameters and the fixtures of the test item."""
tw = item.config.get_terminal_writer()
tw.line()
tw.write(" " * 8)
tw.write(item.nodeid)
used_fixtures = sorted(getattr(item, "fixturenames", []))
if used_fixtures:
tw.write(" (fixtures used: {})".format(", ".join(used_fixtures)))
tw.flush()
def pytest_runtest_setup(item: Item) -> None:
_update_current_test_var(item, "setup")
item.session._setupstate.prepare(item)
def pytest_runtest_call(item: Item) -> None:
_update_current_test_var(item, "call")
try:
del sys.last_type
del sys.last_value
del sys.last_traceback
except AttributeError:
pass
try:
item.runtest()
except Exception as e:
# Store trace info to allow postmortem debugging
sys.last_type = type(e)
sys.last_value = e
assert e.__traceback__ is not None
# Skip *this* frame
sys.last_traceback = e.__traceback__.tb_next
raise e
def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None:
_update_current_test_var(item, "teardown")
item.session._setupstate.teardown_exact(item, nextitem)
_update_current_test_var(item, None)
def _update_current_test_var(
item: Item, when: Optional["Literal['setup', 'call', 'teardown']"]
) -> None:
"""Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage.
If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment.
"""
var_name = "PYTEST_CURRENT_TEST"
if when:
value = f"{item.nodeid} ({when})"
# don't allow null bytes on environment variables (see #2644, #2957)
value = value.replace("\x00", "(null)")
os.environ[var_name] = value
else:
os.environ.pop(var_name)
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
return None
#
# Implementation
def call_and_report(
item: Item, when: "Literal['setup', 'call', 'teardown']", log: bool = True, **kwds
) -> TestReport:
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report: TestReport = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call: "CallInfo[object]", report: BaseReport) -> bool:
"""Check whether the call raised an exception that should be reported as
interactive."""
if call.excinfo is None:
# Didn't raise.
return False
if hasattr(report, "wasxfail"):
# Exception was expected.
return False
if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)):
# Special control flow exception.
return False
return True
def call_runtest_hook(
item: Item, when: "Literal['setup', 'call', 'teardown']", **kwds
) -> "CallInfo[None]":
if when == "setup":
ihook: Callable[..., None] = item.ihook.pytest_runtest_setup
elif when == "call":
ihook = item.ihook.pytest_runtest_call
elif when == "teardown":
ihook = item.ihook.pytest_runtest_teardown
else:
assert False, f"Unhandled runtest hook case: {when}"
reraise: Tuple[Type[BaseException], ...] = (Exit,)
if not item.config.getoption("usepdb", False):
reraise += (KeyboardInterrupt,)
return CallInfo.from_call(
lambda: ihook(item=item, **kwds), when=when, reraise=reraise
)
TResult = TypeVar("TResult", covariant=True)
@final
@attr.s(repr=False)
class CallInfo(Generic[TResult]):
"""Result/Exception info a function invocation.
:param T result:
The return value of the call, if it didn't raise. Can only be
accessed if excinfo is None.
:param Optional[ExceptionInfo] excinfo:
The captured exception of the call, if it raised.
:param float start:
The system time when the call started, in seconds since the epoch.
:param float stop:
The system time when the call ended, in seconds since the epoch.
:param float duration:
The call duration, in seconds.
:param str when:
The context of invocation: "setup", "call", "teardown", ...
"""
_result = attr.ib(type="Optional[TResult]")
excinfo = attr.ib(type=Optional[ExceptionInfo[BaseException]])
start = attr.ib(type=float)
stop = attr.ib(type=float)
duration = attr.ib(type=float)
when = attr.ib(type="Literal['collect', 'setup', 'call', 'teardown']")
@property
def result(self) -> TResult:
if self.excinfo is not None:
raise AttributeError(f"{self!r} has no valid result")
# The cast is safe because an exception wasn't raised, hence
# _result has the expected function return type (which may be
# None, that's why a cast and not an assert).
return cast(TResult, self._result)
@classmethod
def from_call(
cls,
func: "Callable[[], TResult]",
when: "Literal['collect', 'setup', 'call', 'teardown']",
reraise: Optional[
Union[Type[BaseException], Tuple[Type[BaseException], ...]]
] = None,
) -> "CallInfo[TResult]":
excinfo = None
start = timing.time()
precise_start = timing.perf_counter()
try:
result: Optional[TResult] = func()
except BaseException:
excinfo = ExceptionInfo.from_current()
if reraise is not None and isinstance(excinfo.value, reraise):
raise
result = None
# use the perf counter
precise_stop = timing.perf_counter()
duration = precise_stop - precise_start
stop = timing.time()
return cls(
start=start,
stop=stop,
duration=duration,
when=when,
result=result,
excinfo=excinfo,
)
def __repr__(self) -> str:
if self.excinfo is None:
return f"<CallInfo when={self.when!r} result: {self._result!r}>"
return f"<CallInfo when={self.when!r} excinfo={self.excinfo!r}>"
def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport:
return TestReport.from_item_and_call(item, call)
def pytest_make_collect_report(collector: Collector) -> CollectReport:
call = CallInfo.from_call(lambda: list(collector.collect()), "collect")
longrepr: Union[None, Tuple[str, int, str], str, TerminalRepr] = None
if not call.excinfo:
outcome: Literal["passed", "skipped", "failed"] = "passed"
else:
skip_exceptions = [Skipped]
unittest = sys.modules.get("unittest")
if unittest is not None:
# Type ignored because unittest is loaded dynamically.
skip_exceptions.append(unittest.SkipTest) # type: ignore
if isinstance(call.excinfo.value, tuple(skip_exceptions)):
outcome = "skipped"
r_ = collector._repr_failure_py(call.excinfo, "line")
assert isinstance(r_, ExceptionChainRepr), repr(r_)
r = r_.reprcrash
assert r
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
assert isinstance(errorinfo, str)
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
result = call.result if not call.excinfo else None
rep = CollectReport(collector.nodeid, outcome, longrepr, result)
rep.call = call # type: ignore # see collect_one_node
return rep
class SetupState:
"""Shared state for setting up/tearing down test items or collectors."""
def __init__(self):
self.stack: List[Node] = []
self._finalizers: Dict[Node, List[Callable[[], object]]] = {}
def addfinalizer(self, finalizer: Callable[[], object], colitem) -> None:
"""Attach a finalizer to the given colitem."""
assert colitem and not isinstance(colitem, tuple)
assert callable(finalizer)
# assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem) -> None:
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except TEST_OUTCOME as e:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = e
if exc:
raise exc
def _teardown_with_finalization(self, colitem) -> None:
self._callfinalizers(colitem)
colitem.teardown()
for colitem in self._finalizers:
assert colitem in self.stack
def teardown_all(self) -> None:
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem) -> None:
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors) -> None:
exc = None
while self.stack:
if self.stack == needed_collectors[: len(self.stack)]:
break
try:
self._pop_and_teardown()
except TEST_OUTCOME as e:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = e
if exc:
raise exc
def prepare(self, colitem) -> None:
"""Setup objects along the collector chain to the test-method."""
# Check if the last collection node has raised an error.
for col in self.stack:
if hasattr(col, "_prepare_exc"):
exc = col._prepare_exc # type: ignore[attr-defined]
raise exc
needed_collectors = colitem.listchain()
for col in needed_collectors[len(self.stack) :]:
self.stack.append(col)
try:
col.setup()
except TEST_OUTCOME as e:
col._prepare_exc = e # type: ignore[attr-defined]
raise e
def collect_one_node(collector: Collector) -> CollectReport:
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep: CollectReport = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
| 34.205184 | 88 | 0.640525 |
7946912b09114108ab46ea2f12e00357ccfa4bdf | 2,335 | py | Python | tests/test_core.py | quippp/twilio-python | 22b84cdfd19a6b1bde84350053870a7c507af410 | [
"MIT"
] | 11 | 2016-01-23T04:38:23.000Z | 2017-11-19T04:03:25.000Z | venv/lib/python2.7/site-packages/tests/test_core.py | jideobs/twilioAngular | eb95308d287d7dbb72fe516a633199a0af8b76b9 | [
"MIT"
] | 1 | 2016-02-29T01:35:27.000Z | 2016-02-29T01:35:27.000Z | venv/lib/python2.7/site-packages/tests/test_core.py | jideobs/twilioAngular | eb95308d287d7dbb72fe516a633199a0af8b76b9 | [
"MIT"
] | 2 | 2019-05-19T06:02:26.000Z | 2020-12-23T11:27:20.000Z | from datetime import datetime
from datetime import date
from nose.tools import assert_equal
from twilio.rest.resources import parse_date
from twilio.rest.resources import transform_params
from twilio.rest.resources import convert_keys
from twilio.rest.resources import convert_case
from twilio.rest.resources import convert_boolean
from twilio.rest.resources import normalize_dates
def test_date():
d = date(2009, 10, 10)
assert_equal(parse_date(d), "2009-10-10")
def test_datetime():
d = datetime(2009, 10, 10)
assert_equal(parse_date(d), "2009-10-10")
def test_string_date():
d = "2009-10-10"
assert_equal(parse_date(d), "2009-10-10")
def test_string_date_none():
d = None
assert_equal(parse_date(d), None)
def test_string_date_false():
d = False
assert_equal(parse_date(d), None)
def test_fparam():
d = {"HEY": None, "YOU": 3}
ed = {"YOU": 3}
assert_equal(transform_params(d), ed)
def test_multi_param():
d = {"Normal": 3, "Multiple": ["One", "Two"]}
ed = {"Normal": 3, "Multiple": ["One", "Two"]}
assert_equal(transform_params(d), ed)
def test_fparam_booleans():
d = {"HEY": None, "YOU": 3, "Activated": False}
ed = {"YOU": 3, "Activated": "false"}
assert_equal(transform_params(d), ed)
def test_normalize_dates():
@normalize_dates
def foo(on=None, before=None, after=None):
return {
"on": on,
"before": before,
"after": after,
}
d = foo(on="2009-10-10", before=date(2009, 10, 10),
after=datetime(2009, 10, 10))
assert_equal(d["on"], "2009-10-10")
assert_equal(d["after"], "2009-10-10")
assert_equal(d["before"], "2009-10-10")
def test_convert_case():
assert_equal(convert_case("from_"), "From")
assert_equal(convert_case("to"), "To")
assert_equal(convert_case("friendly_name"), "FriendlyName")
def test_convert_bool():
assert_equal(convert_boolean(False), "false")
assert_equal(convert_boolean(True), "true")
assert_equal(convert_boolean(1), 1)
def test_convert_keys():
d = {
"from_": 0,
"to": 0,
"friendly_name": 0,
"ended": 0,
}
ed = {
"From": 0,
"To": 0,
"FriendlyName": 0,
"EndTime": 0,
}
assert_equal(ed, convert_keys(d))
| 22.669903 | 63 | 0.630835 |
79469383dca3782f16702cf74ee8e5be92ff6c87 | 15,949 | py | Python | contrib/client-side/svnmerge/svnmerge-migrate-history.py | YueLinHo/Subversion | b28ce23d2e0109f0ca64b9f2a46be0fa23820f5f | [
"Apache-2.0"
] | 1 | 2018-03-09T01:54:29.000Z | 2018-03-09T01:54:29.000Z | contrib/client-side/svnmerge/svnmerge-migrate-history.py | whiplashcn/subversion | da9560b9b82437f9a8be30165dc110ce708b29bc | [
"Apache-2.0"
] | null | null | null | contrib/client-side/svnmerge/svnmerge-migrate-history.py | whiplashcn/subversion | da9560b9b82437f9a8be30165dc110ce708b29bc | [
"Apache-2.0"
] | 2 | 2017-04-24T23:04:44.000Z | 2020-11-04T07:27:17.000Z | #!/usr/bin/env python
#
# svnmerge-migrate-history.py: Migrate merge history from svnmerge.py's
# format to Subversion 1.5's format.
#
# ====================================================================
# Copyright (c) 2007-2009 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
# This software consists of voluntary contributions made by many
# individuals. For exact contribution history, see the revision
# history and logs, available at http://subversion.tigris.org/.
# ====================================================================
# $HeadURL$
# $LastChangedDate$
# $LastChangedBy$
# $LastChangedRevision$
import warnings
warnings.filterwarnings('ignore', '.*', DeprecationWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
import sys
import os
import sre
import getopt
import urllib
try:
my_getopt = getopt.gnu_getopt
except AttributeError:
my_getopt = getopt.getopt
try:
import svn.core
import svn.fs
import svn.repos
except ImportError as e:
sys.stderr.write(\
"ERROR: Unable to import Subversion's Python bindings: '%s'\n" \
"Hint: Set your PYTHONPATH environment variable, or adjust your " \
"PYTHONSTARTUP\nfile to point to your Subversion install " \
"location's svn-python directory.\n" % e)
sys.exit(1)
# Convenience shortcut.
mergeinfo2str = svn.core.svn_mergeinfo_to_string
# Pretend we have boolean data types for older Python versions.
try:
True
False
except:
True = 1
False = 0
def usage_and_exit(error_msg=None):
"""Write usage information and exit. If ERROR_MSG is provide, that
error message is printed first (to stderr), the usage info goes to
stderr, and the script exits with a non-zero status. Otherwise,
usage info goes to stdout and the script exits with a zero status."""
progname = os.path.basename(sys.argv[0])
stream = error_msg and sys.stderr or sys.stdout
if error_msg:
stream.write("ERROR: %s\n\n" % error_msg)
stream.write("""Usage: %s REPOS_PATH [PATH_PREFIX...] [OPTIONS]
%s --help
Migrate merge history from svnmerge.py's format to Subversion 1.5's
format, stopping as soon as merge history is encountered for a
directory tree.
PATH_PREFIX defines the repository paths to examine for merge history
to migrate. If none are listed, the repository's root is examined.
Options:
--help (-h, -?) Show this usage message.
--dry-run Don't actually commit the results of the migration.
--naive-mode Perform naive (faster, less accurate) migration.
--verbose (-v) Show more informative output.
Example:
%s /path/to/repos trunk branches tags
""" % (progname, progname, progname))
sys.exit(error_msg and 1 or 0)
class Migrator:
"Migrates merge history."
def __init__(self):
self.repos_path = None
self.path_prefixes = None
self.verbose = False
self.dry_run = False
self.naive_mode = False
self.fs = None
def log(self, message, only_when_verbose=True):
if only_when_verbose and not self.verbose:
return
print(message)
def run(self):
self.repos = svn.repos.open(self.repos_path)
self.fs = svn.repos.fs(self.repos)
revnum = svn.fs.youngest_rev(self.fs)
root = svn.fs.revision_root(self.fs, revnum)
# Validate path prefixes, retaining path calculations performed in
# the process.
leading_paths = []
for path_prefix in self.path_prefixes:
path = "/".join(path_prefix[:-1])
leading_paths.append(path)
if svn.fs.check_path(root, path) != svn.core.svn_node_dir:
raise Exception("Repository path '%s' is not a directory" % path)
for i in range(0, len(self.path_prefixes)):
prefix = self.path_prefixes[i]
self.process_dir(root, revnum, leading_paths[i],
prefix[len(prefix) - 1] + ".*")
def flatten_prop(self, propval):
return '\\n'.join(propval.split('\n'))
def process_dir(self, root, revnum, dir_path, pattern=None):
"Recursively process children of DIR_PATH."
dirents = svn.fs.dir_entries(root, dir_path)
for name in dirents.keys():
if not dirents[name].kind == svn.core.svn_node_dir:
continue
if pattern is None or sre.match(pattern, name):
if dir_path == "":
child_path = name
else:
child_path = "%s/%s" % (dir_path, name)
self.log("Examining path '%s' for conversion" % (child_path))
if not self.convert_path_history(root, revnum, child_path):
self.process_dir(root, revnum, child_path)
def convert_path_history(self, root, revnum, path):
"Migrate the merge history for PATH at ROOT at REVNUM."
### Bother to handle any pre-existing, inherited svn:mergeinfo?
# Retrieve existing Subversion 1.5 mergeinfo.
mergeinfo_prop_val = svn.fs.node_prop(root, path,
svn.core.SVN_PROP_MERGEINFO)
if mergeinfo_prop_val is not None:
self.log("Discovered pre-existing Subversion mergeinfo of '%s'" \
% (self.flatten_prop(mergeinfo_prop_val)))
# Retrieve svnmerge.py's merge history meta data, and roll it into
# Subversion 1.5 mergeinfo.
integrated_prop_val = svn.fs.node_prop(root, path, "svnmerge-integrated")
if integrated_prop_val is not None:
self.log("Discovered svnmerge.py mergeinfo of '%s'" \
% (self.flatten_prop(integrated_prop_val)))
### LATER: We handle svnmerge-blocked by converting it into
### svn:mergeinfo, until revision blocking becomes available in
### Subversion's core.
blocked_prop_val = svn.fs.node_prop(root, path, "svnmerge-blocked")
if blocked_prop_val is not None:
self.log("Discovered svnmerge.py blocked revisions of '%s'" \
% (self.flatten_prop(blocked_prop_val)))
# Convert property values into real mergeinfo structures.
svn_mergeinfo = None
if mergeinfo_prop_val is not None:
svn_mergeinfo = svn.core.svn_mergeinfo_parse(mergeinfo_prop_val)
integrated_mergeinfo = self.svnmerge_prop_to_mergeinfo(integrated_prop_val)
blocked_mergeinfo = self.svnmerge_prop_to_mergeinfo(blocked_prop_val)
# Add our various bits of stored mergeinfo together.
new_mergeinfo = self.mergeinfo_merge(svn_mergeinfo, integrated_mergeinfo)
new_mergeinfo = self.mergeinfo_merge(new_mergeinfo, blocked_mergeinfo)
if new_mergeinfo is not None:
self.log("Combined mergeinfo is '%s'" \
% (self.flatten_prop(mergeinfo2str(new_mergeinfo))))
# Unless we're doing a naive migration (or we've no, or only
# empty, mergeinfo anyway), start trying to cleanup after
# svnmerge.py's history-ignorant initialization.
if not self.naive_mode and new_mergeinfo:
# We begin by subtracting the natural history of the merge
# target from its own mergeinfo.
rev = svn.fs.revision_root_revision(root)
implicit_mergeinfo = self.get_natural_history(path, rev)
self.log("Subtracting natural mergeinfo of '%s'" \
% (self.flatten_prop(mergeinfo2str(implicit_mergeinfo))))
new_mergeinfo = svn.core.svn_mergeinfo_remove(implicit_mergeinfo,
new_mergeinfo)
self.log("Remaining mergeinfo is '%s'" \
% (self.flatten_prop(mergeinfo2str(new_mergeinfo))))
# Unfortunately, svnmerge.py tends to initialize using oft-bogus
# revision ranges like 1-SOMETHING when the merge source didn't
# even exist in r1. So if the natural history of a branch
# begins in some revision other than r1, there's still going to
# be cruft revisions left in NEW_MERGEINFO after subtracting the
# natural history. So, we also examine the natural history of
# the merge sources, and use that as a filter for the explicit
# mergeinfo we've calculated so far.
self.log("Filtering mergeinfo by reconstruction from source history ...")
filtered_mergeinfo = {}
for source_path, ranges in new_mergeinfo.items():
### If by some chance it is the case that /path:RANGE1 and
### /path:RANGE2 a) represent different lines of history, and
### b) were combined into /path:RANGE1+RANGE2 (due to the
### ranges being contiguous), we'll foul this up. But the
### chances are preeeeeeeetty slim.
for range in ranges:
try:
source_history = self.get_natural_history(source_path,
range.end,
range.start + 1)
self.log("... adding '%s'" \
% (self.flatten_prop(mergeinfo2str(source_history))))
filtered_mergeinfo = \
svn.core.svn_mergeinfo_merge(filtered_mergeinfo,
source_history)
except svn.core.SubversionException as e:
if not (e.apr_err == svn.core.SVN_ERR_FS_NOT_FOUND
or e.apr_err == svn.core.SVN_ERR_FS_NO_SUCH_REVISION):
raise
self.log("... done.")
new_mergeinfo = filtered_mergeinfo
# Turn our to-be-written mergeinfo back into a property value.
new_mergeinfo_prop_val = None
if new_mergeinfo is not None:
new_mergeinfo_prop_val = mergeinfo2str(new_mergeinfo)
# If we need to change the value of the svn:mergeinfo property or
# delete any svnmerge-* properties, let's do so.
if (new_mergeinfo_prop_val != mergeinfo_prop_val) \
or (integrated_prop_val is not None) \
or (blocked_prop_val is not None):
# If this not a dry-run, begin a transaction in which we'll
# manipulate merge-related properties. Open the transaction root.
if not self.dry_run:
txn = svn.fs.begin_txn2(self.fs, revnum, 0)
root = svn.fs.txn_root(txn)
# Manipulate the merge history.
if new_mergeinfo_prop_val != mergeinfo_prop_val:
# Run the final version of the new svn:mergeinfo through the
# parser to ensure it is in canonical form, e.g. no overlapping
# or unordered rangelists, see
# http://subversion.tigris.org/issues/show_bug.cgi?id=3302.
mergeinfo = svn.core.svn_mergeinfo_parse(new_mergeinfo_prop_val)
new_mergeinfo_prop_val = mergeinfo2str(mergeinfo)
self.log("Queuing change of %s to '%s'"
% (svn.core.SVN_PROP_MERGEINFO,
self.flatten_prop(new_mergeinfo_prop_val)))
if not self.dry_run:
svn.fs.change_node_prop(root, path, svn.core.SVN_PROP_MERGEINFO,
new_mergeinfo_prop_val)
# Remove old property values.
if integrated_prop_val is not None:
self.log("Queuing removal of svnmerge-integrated")
if not self.dry_run:
svn.fs.change_node_prop(root, path, "svnmerge-integrated", None)
if blocked_prop_val is not None:
self.log("Queuing removal of svnmerge-blocked")
if not self.dry_run:
svn.fs.change_node_prop(root, path, "svnmerge-blocked", None)
# Commit the transaction containing our property manipulation.
self.log("Committing the transaction containing the above changes")
if not self.dry_run:
conflict, new_revnum = svn.fs.commit_txn(txn)
if conflict:
raise Exception("Conflict encountered (%s)" % conflict)
self.log("Migrated merge history on '%s' in r%d"
% (path, new_revnum), False)
else:
self.log("Migrated merge history on '%s' in r???" % (path), False)
return True
else:
# No merge history to manipulate.
self.log("No merge history on '%s'" % (path))
return False
def svnmerge_prop_to_mergeinfo(self, svnmerge_prop_val):
"""Parse svnmerge-* property value SVNMERGE_PROP_VAL (which uses
any whitespace for delimiting sources and stores source paths
URI-encoded) into Subversion mergeinfo."""
if svnmerge_prop_val is None:
return None
# First we convert the svnmerge prop value into an svn:mergeinfo
# prop value, then we parse it into mergeinfo.
sources = svnmerge_prop_val.split()
svnmerge_prop_val = ''
for source in sources:
pieces = source.split(':')
if not (len(pieces) == 2 and pieces[1]):
continue
pieces[0] = urllib.unquote(pieces[0])
svnmerge_prop_val = svnmerge_prop_val + '%s\n' % (':'.join(pieces))
return svn.core.svn_mergeinfo_parse(svnmerge_prop_val or '')
def mergeinfo_merge(self, mergeinfo1, mergeinfo2):
"""Like svn.core.svn_mergeinfo_merge(), but preserves None-ness."""
if mergeinfo1 is None and mergeinfo2 is None:
return None
if mergeinfo1 is None:
return mergeinfo2
if mergeinfo2 is None:
return mergeinfo1
return svn.core.svn_mergeinfo_merge(mergeinfo1, mergeinfo2)
def get_natural_history(self, path, rev,
oldest_rev=svn.core.SVN_INVALID_REVNUM):
"""Return the natural history of PATH in REV, between OLDEST_REV
and REV, as mergeinfo. If OLDEST_REV is svn.core.SVN_INVALID_REVNUM,
all of PATH's history prior to REV will be returned.
(Adapted from Subversion's svn_client__get_history_as_mergeinfo().)"""
location_segments = []
def _allow_all(root, path, pool):
return 1
def _segment_receiver(segment, pool):
location_segments.append(segment)
svn.repos.node_location_segments(self.repos, path, rev, rev, oldest_rev,
_segment_receiver, _allow_all)
# Ensure oldest-to-youngest ordering of revision ranges.
location_segments.sort(lambda a, b: cmp(a.range_start, b.range_start))
# Translate location segments into merge sources and ranges.
mergeinfo = {}
for segment in location_segments:
if segment.path is None:
continue
source_path = '/' + segment.path
path_ranges = mergeinfo.get(source_path, [])
range = svn.core.svn_merge_range_t()
range.start = max(segment.range_start - 1, 0)
range.end = segment.range_end
range.inheritable = 1
path_ranges.append(range)
mergeinfo[source_path] = path_ranges
return mergeinfo
def set_path_prefixes(self, prefixes):
"Decompose path prefixes into something meaningful for comparison."
self.path_prefixes = []
for prefix in prefixes:
prefix_components = []
parts = prefix.split("/")
for i in range(0, len(parts)):
prefix_components.append(parts[i])
self.path_prefixes.append(prefix_components)
def main():
try:
opts, args = my_getopt(sys.argv[1:], "vh?",
["verbose", "dry-run", "naive-mode", "help"])
except:
usage_and_exit("Unable to process arguments/options")
migrator = Migrator()
# Process arguments.
if len(args) >= 1:
migrator.repos_path = svn.core.svn_path_canonicalize(args[0])
if len(args) >= 2:
path_prefixes = args[1:]
else:
# Default to the root of the repository.
path_prefixes = [ "" ]
else:
usage_and_exit("REPOS_PATH argument required")
# Process options.
for opt, value in opts:
if opt == "--help" or opt in ("-h", "-?"):
usage_and_exit()
elif opt == "--verbose" or opt == "-v":
migrator.verbose = True
elif opt == "--dry-run":
migrator.dry_run = True
elif opt == "--naive-mode":
migrator.naive_mode = True
else:
usage_and_exit("Unknown option '%s'" % opt)
migrator.set_path_prefixes(path_prefixes)
migrator.run()
if __name__ == "__main__":
main()
| 38.9 | 79 | 0.661985 |
7946939a56bdbcf8e3f641ebe54d9ddc789634a3 | 1,266 | py | Python | trufflehog/models.py | jleeothon/trufflehog | b71eb157da9f263e242641f5af83ca8788e55798 | [
"MIT"
] | null | null | null | trufflehog/models.py | jleeothon/trufflehog | b71eb157da9f263e242641f5af83ca8788e55798 | [
"MIT"
] | 3 | 2020-02-12T00:17:43.000Z | 2021-06-10T19:56:49.000Z | trufflehog/models.py | jleeothon/trufflehog | b71eb157da9f263e242641f5af83ca8788e55798 | [
"MIT"
] | null | null | null | from django.db import models
import datetime
import functools
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
__all__ = ('DateTraceable', 'Hideable')
class DateTraceable(models.Model):
"""
An abstract model mixin that let's you trace the date of creation and
updating.
"""
created = models.DateTimeField(
editable=False,
verbose_name=_("date created")
)
updated = models.DateTimeField(
editable=False,
verbose_name=_("date updated")
)
class Meta:
abstract = True
def save(self, *args, **kwargs):
now = timezone.now()
if not self.pk:
self.created = now
self.updated = now
super().save(*args, **kwargs)
class Hideable(models.Model):
"""
An abstract model mixin that let's you trace date and time of hiding
(pseudo-deletion).
"""
hidden = models.DateTimeField(
null=True,
editable=False,
verbose_name=_("date hidden"),
db_index=True,
)
class Meta:
abstract = True
@property
def is_hidden(self):
return self.hidden is not None
def hide(self):
now = timezone.now()
| 20.419355 | 73 | 0.619273 |
794694a3187a158ea7191373cf03c9b8d878b8d2 | 792 | py | Python | app/core/admin.py | shreyask543/Recipe-api | 34c43db4ee6cdcd90cdcf8e88a536ef66452ddb6 | [
"MIT"
] | null | null | null | app/core/admin.py | shreyask543/Recipe-api | 34c43db4ee6cdcd90cdcf8e88a536ef66452ddb6 | [
"MIT"
] | null | null | null | app/core/admin.py | shreyask543/Recipe-api | 34c43db4ee6cdcd90cdcf8e88a536ef66452ddb6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseuserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseuserAdmin):
ordering=['id']
list_display=['email', 'name']
fieldsets=(
(None, {'fields':('email','password')}),
(_('Personal Info'),{'fields':('name',)}),
(
_('Permissions'),
{'fields':('is_active','is_staff','is_superuser')}
),
(_('Important Dates'), {'fields':('last_login',)}),
)
add_fieldsets=(
(None,
{
'classes':('wide',),
'fields':('email','password1','password2')
}
),
)
admin.site.register(models.User, UserAdmin)
# Register your models here.
| 25.548387 | 64 | 0.570707 |
794694c022586ee11a24e91b5f3c5af080fc2340 | 1,459 | py | Python | stt/core/local_mic.py | Haynie-Research-and-Development/jarvis | 062f20303b3e1d46a20a68e8ed8337b3d05f84dd | [
"Apache-2.0"
] | 78 | 2017-08-19T03:46:13.000Z | 2020-02-19T04:29:45.000Z | stt/core/local_mic.py | Haynie-Research-and-Development/jarvis | 062f20303b3e1d46a20a68e8ed8337b3d05f84dd | [
"Apache-2.0"
] | 5 | 2017-08-21T16:33:08.000Z | 2018-06-21T18:37:18.000Z | stt/core/local_mic.py | Haynie-Research-and-Development/jarvis | 062f20303b3e1d46a20a68e8ed8337b3d05f84dd | [
"Apache-2.0"
] | 13 | 2017-08-19T16:46:08.000Z | 2018-11-05T23:11:34.000Z | # -*- coding: utf-8 -*-
#**********************************************************
#* CATEGORY JARVIS HOME AUTOMTION
#* GROUP SPEECH TO TEXT
#* AUTHOR LANCE HAYNIE <[email protected]>
#**********************************************************
#Jarvis Home Automation
#Copyright (C) 2017 Haynie Research & Development
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along
#with this program; if not, write to the Free Software Foundation, Inc.,
#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
class Mic(object):
prev = None
def __init__(self, *args, **kwargs):
return
def wait_for_keyword(self, keyword="JARVIS"):
return
def active_listen(self, timeout=3):
input = raw_input("YOU: ")
self.prev = input
return [input]
def listen(self):
return self.active_listen(timeout=3)
def say(self, phrase, OPTIONS=None):
print("JARVIS: %s" % phrase)
| 33.930233 | 72 | 0.647019 |
794694c5230efd1b0bcd489a27f0341f8215ab6a | 892 | py | Python | models/model.py | technetbytes/ModelService | 5268d53b4bedb400d8ba4a326297fa7f6b8bc666 | [
"Apache-2.0"
] | null | null | null | models/model.py | technetbytes/ModelService | 5268d53b4bedb400d8ba4a326297fa7f6b8bc666 | [
"Apache-2.0"
] | null | null | null | models/model.py | technetbytes/ModelService | 5268d53b4bedb400d8ba4a326297fa7f6b8bc666 | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, PickleType
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class AiModel(Base):
'''This is AiModel sample Data model class.'''
__tablename__ = "tModels"
__table_args__ = {"schema":"KnowHow.dbo"}
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Text, nullable=False)
modelName = Column(Text, nullable=True)
description = Column(Text, nullable=True)
createdBy = Column(Integer, nullable=True)
createdOn = Column(DateTime, nullable=True)
modifiedBy = Column(Integer, nullable=True)
modifiedOn = Column(DateTime, nullable=True)
isActive = Column(Boolean, nullable=True)
categoryId = Column(Integer, nullable=True)
def __repr__(self):
return '<AiModel model {}>'.format(self.id) | 37.166667 | 90 | 0.705157 |
7946955d4bffa68f8fa64fc98210b680d51091ab | 8,100 | py | Python | examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_derpy.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 9,136 | 2015-01-02T00:41:45.000Z | 2022-03-31T15:30:02.000Z | examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_derpy.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,424 | 2015-01-05T08:55:58.000Z | 2022-03-30T19:34:55.000Z | examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_derpy.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,921 | 2015-01-02T10:19:30.000Z | 2022-03-31T02:48:42.000Z | """This file implements the functionalities of a minitaur derpy using pybullet.
It is the result of first pass system identification for the derpy robot. The
"""
import math
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
from pybullet_envs.minitaur.envs import minitaur
KNEE_CONSTRAINT_POINT_LONG = [0, 0.0055, 0.088]
KNEE_CONSTRAINT_POINT_SHORT = [0, 0.0055, 0.100]
class MinitaurDerpy(minitaur.Minitaur):
"""The minitaur class that simulates a quadruped robot from Ghost Robotics.
"""
def Reset(self, reload_urdf=True, default_motor_angles=None, reset_time=3.0):
"""Reset the minitaur to its initial states.
Args:
reload_urdf: Whether to reload the urdf file. If not, Reset() just place
the minitaur back to its starting position.
default_motor_angles: The default motor angles. If it is None, minitaur
will hold a default pose (motor angle math.pi / 2) for 100 steps. In
torque control mode, the phase of holding the default pose is skipped.
reset_time: The duration (in seconds) to hold the default motor angles. If
reset_time <= 0 or in torque control mode, the phase of holding the
default pose is skipped.
"""
if self._on_rack:
init_position = minitaur.INIT_RACK_POSITION
else:
init_position = minitaur.INIT_POSITION
if reload_urdf:
if self._self_collision_enabled:
self.quadruped = self._pybullet_client.loadURDF(
"%s/quadruped/minitaur_derpy.urdf" % self._urdf_root,
init_position,
useFixedBase=self._on_rack,
flags=(self._pybullet_client.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT))
else:
self.quadruped = self._pybullet_client.loadURDF("%s/quadruped/minitaur_derpy.urdf" %
self._urdf_root,
init_position,
useFixedBase=self._on_rack)
self._BuildJointNameToIdDict()
self._BuildUrdfIds()
if self._remove_default_joint_damping:
self._RemoveDefaultJointDamping()
self._BuildMotorIdList()
self._RecordMassInfoFromURDF()
self._RecordInertiaInfoFromURDF()
self.ResetPose(add_constraint=True)
else:
self._pybullet_client.resetBasePositionAndOrientation(self.quadruped, init_position,
minitaur.INIT_ORIENTATION)
self._pybullet_client.resetBaseVelocity(self.quadruped, [0, 0, 0], [0, 0, 0])
self.ResetPose(add_constraint=False)
self._overheat_counter = np.zeros(self.num_motors)
self._motor_enabled_list = [True] * self.num_motors
self._step_counter = 0
# Perform reset motion within reset_duration if in position control mode.
# Nothing is performed if in torque control mode for now.
# TODO(jietan): Add reset motion when the torque control is fully supported.
self._observation_history.clear()
if not self._torque_control_enabled and reset_time > 0.0:
self.ReceiveObservation()
for _ in range(100):
self.ApplyAction([math.pi / 2] * self.num_motors)
self._pybullet_client.stepSimulation()
self.ReceiveObservation()
if default_motor_angles is not None:
num_steps_to_reset = int(reset_time / self.time_step)
for _ in range(num_steps_to_reset):
self.ApplyAction(default_motor_angles)
self._pybullet_client.stepSimulation()
self.ReceiveObservation()
self.ReceiveObservation()
def _ResetPoseForLeg(self, leg_id, add_constraint):
"""Reset the initial pose for the leg.
Args:
leg_id: It should be 0, 1, 2, or 3, which represents the leg at
front_left, back_left, front_right and back_right.
add_constraint: Whether to add a constraint at the joints of two feet.
"""
knee_friction_force = 0
half_pi = math.pi / 2.0
knee_angle = -2.1834
leg_position = minitaur.LEG_POSITION[leg_id]
self._pybullet_client.resetJointState(self.quadruped,
self._joint_name_to_id["motor_" + leg_position +
"L_joint"],
self._motor_direction[2 * leg_id] * half_pi,
targetVelocity=0)
self._pybullet_client.resetJointState(self.quadruped,
self._joint_name_to_id["knee_" + leg_position +
"L_joint"],
self._motor_direction[2 * leg_id] * knee_angle,
targetVelocity=0)
self._pybullet_client.resetJointState(self.quadruped,
self._joint_name_to_id["motor_" + leg_position +
"R_joint"],
self._motor_direction[2 * leg_id + 1] * half_pi,
targetVelocity=0)
self._pybullet_client.resetJointState(self.quadruped,
self._joint_name_to_id["knee_" + leg_position +
"R_joint"],
self._motor_direction[2 * leg_id + 1] * knee_angle,
targetVelocity=0)
if add_constraint:
if leg_id < 2:
self._pybullet_client.createConstraint(
self.quadruped, self._joint_name_to_id["knee_" + leg_position + "R_joint"],
self.quadruped, self._joint_name_to_id["knee_" + leg_position + "L_joint"],
self._pybullet_client.JOINT_POINT2POINT, [0, 0, 0], KNEE_CONSTRAINT_POINT_SHORT,
KNEE_CONSTRAINT_POINT_LONG)
else:
self._pybullet_client.createConstraint(
self.quadruped, self._joint_name_to_id["knee_" + leg_position + "R_joint"],
self.quadruped, self._joint_name_to_id["knee_" + leg_position + "L_joint"],
self._pybullet_client.JOINT_POINT2POINT, [0, 0, 0], KNEE_CONSTRAINT_POINT_LONG,
KNEE_CONSTRAINT_POINT_SHORT)
if self._accurate_motor_model_enabled or self._pd_control_enabled:
# Disable the default motor in pybullet.
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["motor_" + leg_position + "L_joint"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["motor_" + leg_position + "R_joint"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
else:
self._SetDesiredMotorAngleByName("motor_" + leg_position + "L_joint",
self._motor_direction[2 * leg_id] * half_pi)
self._SetDesiredMotorAngleByName("motor_" + leg_position + "R_joint",
self._motor_direction[2 * leg_id + 1] * half_pi)
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["knee_" + leg_position + "L_joint"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(
bodyIndex=self.quadruped,
jointIndex=(self._joint_name_to_id["knee_" + leg_position + "R_joint"]),
controlMode=self._pybullet_client.VELOCITY_CONTROL,
targetVelocity=0,
force=knee_friction_force)
| 47.368421 | 93 | 0.623086 |
794695b5c4d63abb3ce976e7fe32fe489183d196 | 2,079 | py | Python | airflow/providers/amazon/aws/sensors/sagemaker_tuning.py | daemon-demon/airflow | 6f96e81f0123b30750fb68ec496246023bf63f35 | [
"Apache-2.0"
] | 2 | 2021-07-30T17:25:56.000Z | 2021-08-03T13:51:09.000Z | airflow/providers/amazon/aws/sensors/sagemaker_tuning.py | daemon-demon/airflow | 6f96e81f0123b30750fb68ec496246023bf63f35 | [
"Apache-2.0"
] | 20 | 2021-01-23T12:33:08.000Z | 2021-12-07T22:30:37.000Z | airflow/providers/amazon/aws/sensors/sagemaker_tuning.py | daemon-demon/airflow | 6f96e81f0123b30750fb68ec496246023bf63f35 | [
"Apache-2.0"
] | 1 | 2020-09-10T09:51:46.000Z | 2020-09-10T09:51:46.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.providers.amazon.aws.hooks.sagemaker import SageMakerHook
from airflow.providers.amazon.aws.sensors.sagemaker_base import SageMakerBaseSensor
from airflow.utils.decorators import apply_defaults
class SageMakerTuningSensor(SageMakerBaseSensor):
"""
Asks for the state of the tuning state until it reaches a terminal state.
The sensor will error if the job errors, throwing a AirflowException
containing the failure reason.
:param job_name: job_name of the tuning instance to check the state of
:type job_name: str
"""
template_fields = ['job_name']
template_ext = ()
@apply_defaults
def __init__(self, *, job_name, **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
def non_terminal_states(self):
return SageMakerHook.non_terminal_states
def failed_states(self):
return SageMakerHook.failed_states
def get_sagemaker_response(self):
self.log.info('Poking Sagemaker Tuning Job %s', self.job_name)
return self.get_hook().describe_tuning_job(self.job_name)
def get_failed_reason_from_response(self, response):
return response['FailureReason']
def state_from_response(self, response):
return response['HyperParameterTuningJobStatus']
| 36.473684 | 83 | 0.749399 |
7946960dbd2f5d9667143105ebb524c987f9f246 | 969 | gyp | Python | cloud_print/gcp20/prototype/gcp20_device.gyp | hujiajie/pa-chromium | 1816ff80336a6efd1616f9e936880af460b1e105 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2020-05-03T06:33:56.000Z | 2021-11-14T18:39:42.000Z | cloud_print/gcp20/prototype/gcp20_device.gyp | hujiajie/pa-chromium | 1816ff80336a6efd1616f9e936880af460b1e105 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | cloud_print/gcp20/prototype/gcp20_device.gyp | hujiajie/pa-chromium | 1816ff80336a6efd1616f9e936880af460b1e105 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'target_defaults': {
'variables': {
'chromium_code': 1,
'enable_wexit_time_destructors': 1,
},
'include_dirs': [
'<(DEPTH)',
# To allow including "version.h"
'<(SHARED_INTERMEDIATE_DIR)',
],
},
'targets': [
{
'target_name': 'gcp20_device',
'type': 'executable',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/net/net.gyp:net',
],
'sources': [
'dns_sd_server.cc',
'gcp20_device.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '1', # Set /SUBSYSTEM:CONSOLE
'AdditionalDependencies': [
'secur32.lib',
'httpapi.lib',
'Ws2_32.lib',
],
},
},
},
],
}
| 23.071429 | 72 | 0.506708 |
794697fd8043f084052f9059e46bd0864f7c68e9 | 2,212 | py | Python | v2PyTorch/evaluate.py | gzerveas/TransformChrome | ab1046009aff2ec863aa65223dcfcd750d41ab86 | [
"MIT"
] | 1 | 2020-07-14T18:19:18.000Z | 2020-07-14T18:19:18.000Z | v2PyTorch/evaluate.py | gzerveas/TransformChrome | ab1046009aff2ec863aa65223dcfcd750d41ab86 | [
"MIT"
] | null | null | null | v2PyTorch/evaluate.py | gzerveas/TransformChrome | ab1046009aff2ec863aa65223dcfcd750d41ab86 | [
"MIT"
] | null | null | null | import numpy
import torch
import scipy
import scipy.sparse as sp
import logging
from six.moves import xrange
from collections import OrderedDict
import sys
import pdb
from sklearn import metrics
import torch.nn.functional as F
from torch.autograd import Variable
from pdb import set_trace as stop
def save_to_csv(epoch,metrics, outfile=None):
line = ""
# for item in metrics:
# line += str(item)
# line += ","
if outfile:
with open(outfile, 'a') as f:
for item in metrics:
line += str(item)
line += ','
f.write(epoch + ',' + line + '\n')
print(epoch + ',' + line + '\n')
f.close()
# with open(outfile, 'w') as f:
# for item in metrics:
# line += str(item)
# line +=','
# f.write(epoch + ',' + line + '\n')
return
def compute_aupr(all_targets,all_predictions):
aupr_array = []
for i in range(all_targets.shape[1]):
try:
precision, recall, thresholds = metrics.precision_recall_curve(all_targets[:,i], all_predictions[:,i], pos_label=1)
auPR = metrics.auc(recall,precision)#,reorder=True)
if not math.isnan(auPR):
aupr_array.append(numpy.nan_to_num(auPR))
except:
pass
aupr_array = numpy.array(aupr_array)
mean_aupr = numpy.mean(aupr_array)
median_aupr = numpy.median(aupr_array)
var_aupr = numpy.var(aupr_array)
return mean_aupr,median_aupr,var_aupr,aupr_array
def compute_auc(all_targets,all_predictions):
auc_array = []
for i in range(all_targets.shape[1]):
try:
auROC = metrics.roc_auc_score(all_targets[:,i], all_predictions[:,i])
auc_array.append(auROC)
except ValueError:
pass
auc_array = numpy.array(auc_array)
mean_auc = numpy.mean(auc_array)
median_auc = numpy.median(auc_array)
var_auc = numpy.var(auc_array)
return mean_auc,median_auc,var_auc,auc_array
def compute_metrics(predictions, targets):
pred=predictions.numpy()
targets=targets.numpy()
mean_auc,median_auc,var_auc,auc_array = compute_auc(targets,pred)
mean_aupr,median_aupr,var_aupr,aupr_array = compute_aupr(targets,pred)
return mean_aupr,mean_auc
| 25.72093 | 127 | 0.661392 |
7946980c82684da7c6dc80538f79d4be317a96b6 | 3,135 | py | Python | superset/explore/form_data/commands/create.py | nieaijun99/superset | 86368dd406b9e828f31186a4b6179d24758a7d87 | [
"Apache-2.0"
] | 2 | 2021-12-21T15:57:16.000Z | 2022-01-31T02:22:02.000Z | superset/explore/form_data/commands/create.py | nieaijun99/superset | 86368dd406b9e828f31186a4b6179d24758a7d87 | [
"Apache-2.0"
] | null | null | null | superset/explore/form_data/commands/create.py | nieaijun99/superset | 86368dd406b9e828f31186a4b6179d24758a7d87 | [
"Apache-2.0"
] | 2 | 2021-12-21T13:41:18.000Z | 2021-12-26T22:16:43.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from flask import session
from sqlalchemy.exc import SQLAlchemyError
from superset.commands.base import BaseCommand
from superset.explore.form_data.commands.parameters import CommandParameters
from superset.explore.form_data.commands.state import TemporaryExploreState
from superset.explore.form_data.commands.utils import check_access
from superset.extensions import cache_manager
from superset.key_value.utils import get_owner, random_key
from superset.temporary_cache.commands.exceptions import TemporaryCacheCreateFailedError
from superset.temporary_cache.utils import cache_key
from superset.utils.schema import validate_json
logger = logging.getLogger(__name__)
class CreateFormDataCommand(BaseCommand):
def __init__(self, cmd_params: CommandParameters):
self._cmd_params = cmd_params
def run(self) -> str:
self.validate()
try:
datasource_id = self._cmd_params.datasource_id
datasource_type = self._cmd_params.datasource_type
chart_id = self._cmd_params.chart_id
tab_id = self._cmd_params.tab_id
actor = self._cmd_params.actor
form_data = self._cmd_params.form_data
check_access(datasource_id, chart_id, actor, datasource_type)
contextual_key = cache_key(
session.get("_id"), tab_id, datasource_id, chart_id, datasource_type
)
key = cache_manager.explore_form_data_cache.get(contextual_key)
if not key or not tab_id:
key = random_key()
if form_data:
state: TemporaryExploreState = {
"owner": get_owner(actor),
"datasource_id": datasource_id,
"datasource_type": datasource_type,
"chart_id": chart_id,
"form_data": form_data,
}
cache_manager.explore_form_data_cache.set(key, state)
cache_manager.explore_form_data_cache.set(contextual_key, key)
return key
except SQLAlchemyError as ex:
logger.exception("Error running create command")
raise TemporaryCacheCreateFailedError() from ex
def validate(self) -> None:
if self._cmd_params.form_data:
validate_json(self._cmd_params.form_data)
| 42.945205 | 88 | 0.704944 |
79469aa6f38865ce33bd754061ea9ab7f7547358 | 3,447 | py | Python | tests/test_permissions.py | jonathan-s/djangocms-alias | 9a9f3020aa0a01339164094c2bc1caf190d66428 | [
"BSD-3-Clause"
] | 1 | 2020-05-12T02:29:16.000Z | 2020-05-12T02:29:16.000Z | tests/test_permissions.py | jonathan-s/djangocms-alias | 9a9f3020aa0a01339164094c2bc1caf190d66428 | [
"BSD-3-Clause"
] | 9 | 2021-11-30T16:12:46.000Z | 2022-03-31T16:55:36.000Z | tests/test_permissions.py | jonathan-s/djangocms-alias | 9a9f3020aa0a01339164094c2bc1caf190d66428 | [
"BSD-3-Clause"
] | 7 | 2020-01-13T09:10:40.000Z | 2021-07-21T12:49:07.000Z | from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from cms.api import add_plugin
from djangocms_alias.cms_plugins import Alias
from djangocms_alias.models import Alias as AliasModel
from djangocms_alias.utils import is_versioning_enabled
from .base import BaseAliasPluginTestCase
class AliasPermissionsTestCase(BaseAliasPluginTestCase):
def test_can_create_alias_superuser(self):
self.assertTrue(
Alias.can_create_alias(
self.get_superuser(),
[self.plugin],
),
)
def test_can_create_alias_standard_user(self):
self.assertFalse(
Alias.can_create_alias(
self.get_standard_user(),
[self.plugin],
),
)
def test_can_create_alias_staff_no_permissions(self):
self.assertFalse(
Alias.can_create_alias(
self.get_staff_user_with_no_permissions(),
[self.plugin],
),
)
def test_can_create_alias_staff_partial_permissions(self):
user = self.get_staff_user_with_no_permissions()
user.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(
AliasModel,
),
codename='add_alias',
)
)
user.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(
Alias.model,
),
codename='add_aliasplugin',
)
)
alias = self._create_alias(self.placeholder.get_plugins())
add_plugin(
self.placeholder,
Alias,
language=self.language,
alias=alias,
)
self.assertFalse(
Alias.can_create_alias(
user,
self.placeholder.get_plugins(),
),
)
def test_can_create_alias_staff_enough_permissions(self):
user = self.get_staff_user_with_std_permissions()
user.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(
AliasModel,
),
codename='add_alias',
)
)
self.assertTrue(
Alias.can_create_alias(
user,
self.placeholder.get_plugins(),
),
)
def test_can_detach_no_permission(self):
user = self.get_staff_user_with_no_permissions()
alias = self._create_alias(self.placeholder.get_plugins())
self.assertFalse(
Alias.can_detach(
user,
self.placeholder,
alias.get_placeholder(self.language).get_plugins(),
),
)
def test_can_detach_has_permission(self):
user = self.get_staff_user_with_std_permissions()
alias = self._create_alias(self.placeholder.get_plugins())
placeholder = self.placeholder
if is_versioning_enabled():
placeholder = self._get_draft_page_placeholder()
self.assertTrue(
Alias.can_detach(
user,
placeholder,
alias.get_placeholder(self.language).get_plugins(),
),
)
| 30.776786 | 67 | 0.577894 |
79469aacd2457be98e2ed7887cc05db3dca2cf18 | 787 | py | Python | tests/conftest.py | caphrim007/hashin | a9e2239bf46fcba12be9ecac272a806fc48bfe1e | [
"MIT"
] | 92 | 2016-01-27T10:41:48.000Z | 2022-03-28T19:54:46.000Z | tests/conftest.py | caphrim007/hashin | a9e2239bf46fcba12be9ecac272a806fc48bfe1e | [
"MIT"
] | 91 | 2016-01-26T22:11:22.000Z | 2022-03-19T12:59:29.000Z | tests/conftest.py | caphrim007/hashin | a9e2239bf46fcba12be9ecac272a806fc48bfe1e | [
"MIT"
] | 28 | 2016-02-01T22:05:27.000Z | 2022-02-24T06:28:09.000Z | import os
from tempfile import mkdtemp
from contextlib import contextmanager
from shutil import rmtree
import mock
import pytest
@pytest.fixture
def murlopen():
with mock.patch("hashin.urlopen") as patch:
yield patch
@pytest.fixture
def mock_get_parser():
with mock.patch("hashin.get_parser") as patch:
yield patch
@pytest.fixture
def mock_sys():
with mock.patch("hashin.sys") as patch:
yield patch
@pytest.fixture
def mock_run():
with mock.patch("hashin.run") as patch:
yield patch
@pytest.fixture
def tmpfile():
@contextmanager
def inner(name="requirements.txt"):
dir_ = mkdtemp("hashintest")
try:
yield os.path.join(dir_, name)
finally:
rmtree(dir_)
return inner
| 17.108696 | 50 | 0.660737 |
79469ba89529a2626dacbbfb24ed0dab6ffe4d8d | 901 | py | Python | pdfcreator/urls.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | null | null | null | pdfcreator/urls.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | 5 | 2021-03-18T20:13:38.000Z | 2022-01-13T00:35:37.000Z | pdfcreator/urls.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | null | null | null | from django.conf.urls import url
from pdfcreator import views, summary
app_name = 'pdfcreator'
urlpatterns = [
url(r'^view/prescription/(?P<p_id>[0-9]+)/(?P<number>[0-9]+)/(?P<user_pk>[0-9]+)/$', views.view_prescription, name='view_prescription'),
url(r'^pdf/prescription/(?P<p_id>[0-9]+)/(?P<number>[0-9]+)/$', views.print_prescription, name='print_prescription'),
#
url(r'^view/referralnote/(?P<p_id>[0-9]+)/(?P<number>[0-9]+)/(?P<user_pk>[0-9]+)/$',
views.view_referralnote, name='view_referralnote'),
url(r'^pdf/referralnote/(?P<p_id>[0-9]+)/(?P<number>[0-9]+)/$', views.print_referralnote,
name='print_referralnote'),
#
url(r'^view/report/(?P<user_pk>[0-9]+)/(?P<p_id>[0-9]+)/(?P<tp_num>[0-9]+)/$', summary.view_report, name='view_report'),
url(r'^pdf/report/(?P<p_id>[0-9]+)/(?P<tp_num>[0-9]+)/$', summary.print_report, name='print_report'),
] | 47.421053 | 140 | 0.624861 |
79469bb18a2e15a0d92c4d47f85ab7c15df9b034 | 1,519 | py | Python | task1_watchers.py | MoisesFreitas1/DeeperSystems_Task1 | e1ea248c76db72297ad7ff40da645d41d88c031a | [
"MIT"
] | null | null | null | task1_watchers.py | MoisesFreitas1/DeeperSystems_Task1 | e1ea248c76db72297ad7ff40da645d41d88c031a | [
"MIT"
] | null | null | null | task1_watchers.py | MoisesFreitas1/DeeperSystems_Task1 | e1ea248c76db72297ad7ff40da645d41d88c031a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 14:36:24 2020
@author: Moises
"""
import pandas as pd
import json
df_json = pd.read_json('source_file_2.json')
df_json = df_json.sort_values(by ='priority')
df_json = pd.concat([df_json], ignore_index=True)
watchers = df_json['watchers'].to_frame()
watchers = pd.concat([watchers], ignore_index=True)
watchersSTR = []
for row in range(len(watchers)):
sets = watchers.iloc[row].to_string().replace('watchers ', '')
sets = sets.replace('[', '')
sets = sets.replace(']', '')
sets = sets.replace(' ', '')
if("," in sets):
manSet = sets.split(',')
for i in range(0, len(manSet)):
watchersSTR.append(manSet[i])
else:
watchersSTR.append(sets)
watchersSets = pd.DataFrame(watchersSTR).drop_duplicates()
watchers_List_Col = watchersSets.iloc[:,0].to_list()
listProjects = [[]]
for i in range(1,len(watchers_List_Col)):
listProjects.append([])
listProjects[1]
for i in range(len(df_json)):
for j in range(len(watchersSets)):
if(watchersSets.iloc[j].to_string().replace(str(0) + " ", "") in df_json.loc[i,'watchers']):
col = watchersSets.iloc[j].to_string().replace(str(0) + " ", "")
listProjects[j].append(df_json.loc[i,'name'])
watchersDF = pd.DataFrame([listProjects], columns = watchers_List_Col)
watchersDict = watchersDF.to_dict(orient='list')
with open('watchers.json', 'w') as fp:
json.dump(watchersDict, fp) | 29.211538 | 103 | 0.641211 |
79469bb5d514c3c8dbefdef7f387feb8ea3c7246 | 22,090 | py | Python | cozy/evaluation.py | MostAwesomeDude/cozy | e7b0ace2915c54b1176fc4d3eed289ede109a058 | [
"Apache-2.0"
] | null | null | null | cozy/evaluation.py | MostAwesomeDude/cozy | e7b0ace2915c54b1176fc4d3eed289ede109a058 | [
"Apache-2.0"
] | null | null | null | cozy/evaluation.py | MostAwesomeDude/cozy | e7b0ace2915c54b1176fc4d3eed289ede109a058 | [
"Apache-2.0"
] | null | null | null | """Interpreter for Cozy expressions.
Important functions:
- eval: execute an expression in an environment
- eval_bulk: execute the same expression in many different environments
"""
from functools import cmp_to_key, lru_cache
import itertools
from fractions import Fraction
from cozy.target_syntax import *
from cozy.syntax_tools import pprint, free_vars, free_vars_and_funcs, purify
from cozy.common import FrozenDict, OrderedSet, extend, unique
from cozy.typecheck import is_numeric, is_collection
from cozy.structures import extension_handler
from cozy.value_types import Map, Bag, Handle, compare_values, values_equal, LT, EQ, GT
def eval(e : Exp, env : {str:object}, *args, **kwargs):
"""Evaluate an expression in an environment.
Parameters:
e - a Cozy expression
env - an environment mapping variable names to values
use_default_values_for_undefined_vars - boolean indicating whether to
use a default value for any variable missing in the environment.
If false, then an error is thrown when a variable has no associated
value. Defaults to False.
"""
return eval_bulk(e, (env,), *args, **kwargs)[0]
def eval_bulk(
e : Exp,
envs : [{str:object}],
use_default_values_for_undefined_vars : bool = False):
"""Evaluate an expression in many different environments.
This function accepts the same arguments as `eval`, but takes a list of
environments instead of just one.
The call
eval_bulk(e, envs)
is equivalent to
[eval(e, env) for env in envs].
However, using `eval_bulk` is much faster than repeatedly calling `eval` on
the same expression.
"""
e = purify(e)
if not envs:
return []
ops = []
vars = OrderedSet(free_vars_and_funcs(e))
types = { v.id : v.type for v in free_vars(e) }
vmap = { v : i for (i, v) in enumerate(vars) }
try:
envs = [ [(env.get(v, mkval(types[v])) if (use_default_values_for_undefined_vars and v in types) else env[v]) for v in vars] for env in envs ]
except KeyError:
import sys
print("OH NO", file=sys.stderr)
print("e = {}".format(pprint(e)), file=sys.stderr)
print("eval_bulk({!r}, {!r}, use_default_values_for_undefined_vars={!r})".format(e, envs, use_default_values_for_undefined_vars), file=sys.stderr)
raise
_compile(e, vmap, ops)
return [_eval_compiled(ops, env) for env in envs]
@lru_cache(maxsize=None)
def mkval(type : Type):
"""
Produce an arbitrary value of the given type.
eval(construct_value(t), {}) == mkval(t)
"""
return eval(construct_value(type), {})
@typechecked
def construct_value(t : Type) -> Exp:
"""
Construct an arbitrary expression e of the given type.
eval(construct_value(t), {}) == mkval(t)
"""
if is_numeric(t):
e = ENum(0)
elif t == BOOL:
e = F
elif t == STRING:
e = EStr("")
elif is_collection(t):
e = EEmptyList()
elif isinstance(t, TTuple):
e = ETuple(tuple(construct_value(tt) for tt in t.ts))
elif isinstance(t, TRecord):
e = EMakeRecord(tuple((f, construct_value(tt)) for (f, tt) in t.fields))
elif isinstance(t, TEnum):
e = EEnumEntry(t.cases[0])
elif isinstance(t, THandle):
e = EHandle(construct_value(INT), construct_value(t.value_type))
elif isinstance(t, TNative):
e = ENative(construct_value(INT))
elif isinstance(t, TMap):
e = EMakeMap2(
EEmptyList().with_type(TBag(t.k)),
ELambda(EVar("x").with_type(t.k), construct_value(t.v)))
else:
h = extension_handler(type(t))
if h is not None:
return h.default_value(t, construct_value)
raise NotImplementedError(pprint(t))
return e.with_type(t)
def _uneval(t, value):
if is_numeric(t):
return ENum(value).with_type(t)
elif t == BOOL:
return EBool(value).with_type(t)
elif is_collection(t):
e = EEmptyList().with_type(t)
for x in value:
e = EBinOp(e, "+", ESingleton(uneval(t.t, x)).with_type(t)).with_type(t)
return e
elif isinstance(t, TString):
return EStr(value).with_type(t)
elif isinstance(t, TTuple):
return ETuple(tuple(uneval(tt, x) for (tt, x) in zip(t.ts, value))).with_type(t)
elif isinstance(t, TRecord):
return EMakeRecord(tuple((f, uneval(tt, value[f])) for (f, tt) in t.fields)).with_type(t)
elif isinstance(t, TEnum):
return EEnumEntry(value).with_type(t)
elif isinstance(t, THandle):
return EHandle(ENum(value.address).with_type(INT), uneval(t.value_type, value.value)).with_type(t)
elif isinstance(t, TNative):
return ENative(ENum(value[1]).with_type(INT)).with_type(t)
else:
raise NotImplementedError(pprint(t))
@typechecked
def uneval(t : Type, value) -> Exp:
"""
Produce an expression `e` of type `t` such that `eval(e, {}) == value`.
"""
res = _uneval(t, value)
assert eval(res, {}) == value
return res
def _eval_compiled(ops, init_stk=()):
ops = list(reversed(ops))
stk = list(init_stk)
while ops:
op = ops.pop()
new_ops = op(stk)
if new_ops:
ops.extend(reversed(new_ops))
return stk[-1]
def push(val):
def _push(stk):
stk.append(val)
return _push
def push_true(stk):
stk.append(True)
def push_false(stk):
stk.append(False)
def make_handle(stk):
value = stk.pop()
addr = stk.pop()
stk.append(Handle(addr, value))
def make_singleton_bag(stk):
stk.append(Bag((stk.pop(),)))
def make_singleton_list(stk):
stk.append((stk.pop(),))
def withalteredvalue(stk):
nv = stk.pop()
h = stk.pop()
stk.append(Handle(h.address, nv))
def push_null(stk):
stk.append(None)
def get_handle_value(stk):
stk.append(stk.pop().value)
def iterable_to_bag(stk):
stk.append(Bag(stk.pop()))
def iterable_to_list(stk):
stk.append(tuple(stk.pop()))
def read_map(stk):
k = stk.pop()
m = stk.pop()
stk.append(m[k])
def has_key(key_type):
def _has_key(stk):
k = stk.pop()
m = stk.pop()
stk.append(any(values_equal(key_type, k, kk) for kk in m.keys()))
return _has_key
def read_map_keys(stk):
stk.append(Bag(stk.pop().keys()))
def binaryop_add_numbers(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(v1 + v2)
def binaryop_add_collections(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(Bag(itertools.chain(v1, v2)))
def binaryop_add_sets(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(Bag(unique(itertools.chain(v1, v2))))
def binaryop_mul(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(v1 * v2)
def binaryop_sub(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(v1 - v2)
def binaryop_sub_bags(elem_type):
def binaryop_sub_bags(stk):
v2 = stk.pop()
v1 = stk.pop()
elems = list(v1)
for x in v2:
for i in range(len(elems)):
if values_equal(elem_type, x, elems[i]):
del elems[i]
break
stk.append(Bag(elems))
return binaryop_sub_bags
def binaryop_sub_lists(elem_type):
def binaryop_sub_lists(stk):
v2 = stk.pop()
v1 = stk.pop()
elems = list(v1)
for x in v2:
for i in range(len(elems)):
if values_equal(elem_type, x, elems[i]):
del elems[i]
break
stk.append(tuple(elems))
return binaryop_sub_lists
def binaryop_eq(t, deep=False):
def binaryop_eq(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(compare_values(t, v1, v2, deep=deep) == EQ)
return binaryop_eq
def binaryop_ne(t):
def binaryop_ne(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(compare_values(t, v1, v2) != EQ)
return binaryop_ne
def binaryop_lt(t):
def binaryop_lt(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(compare_values(t, v1, v2) == LT)
return binaryop_lt
def binaryop_le(t):
def binaryop_le(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(compare_values(t, v1, v2) != GT)
return binaryop_le
def binaryop_gt(t):
def binaryop_gt(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(compare_values(t, v1, v2) == GT)
return binaryop_gt
def binaryop_ge(t):
def binaryop_ge(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(compare_values(t, v1, v2) != LT)
return binaryop_ge
def binaryop_in(elem_type):
def binaryop_in(stk):
v2 = stk.pop()
v1 = stk.pop()
stk.append(any(values_equal(elem_type, v1, v2elem) for v2elem in v2))
return binaryop_in
def unaryop_not(stk):
stk.append(not stk.pop())
def unaryop_sum(stk):
stk.append(sum(stk.pop()))
def unaryop_all(stk):
stk.append(all(stk.pop()))
def unaryop_any(stk):
stk.append(any(stk.pop()))
def unaryop_exists(stk):
stk.append(bool(stk.pop()))
def unaryop_empty(stk):
stk.append(not stk.pop())
def unaryop_neg(stk):
stk.append(-stk.pop())
def unaryop_areunique(elem_type):
keyfunc = cmp_to_key(lambda v1, v2: compare_values(elem_type, v1, v2))
def unaryop_areunique(stk):
v = stk.pop()
l = sorted(v, key=keyfunc)
res = True
for i in range(len(l) - 1):
if values_equal(elem_type, l[i], l[i+1]):
res = False
break
stk.append(res)
return unaryop_areunique
def unaryop_distinct(elem_type):
def unaryop_distinct(stk):
v = stk.pop()
res = []
for x in v:
if not any(values_equal(elem_type, x, y) for y in res):
res.append(x)
stk.append(Bag(res))
return unaryop_distinct
def unaryop_the(default):
def _unaryop_the(stk):
v = stk.pop()
stk.append(v[0] if v else default)
return _unaryop_the
def unaryop_reversed(stk):
stk.append(tuple(reversed(stk.pop())))
def unaryop_len(stk):
stk.append(len(stk.pop()))
def do_concat(stk):
v = stk.pop()
stk.append(Bag(elem for bag in v for elem in bag))
def if_then_else(then_code, else_code):
def ite(stk):
return then_code if stk.pop() else else_code
return ite
def dup(stk):
stk.append(stk[-1])
def swp(stk):
x = stk.pop()
y = stk.pop()
stk.append(x)
stk.append(y)
def drop(stk):
stk.pop()
def drop_front(stk):
l = stk.pop()
stk.append(l[1:])
def drop_back(stk):
l = stk.pop()
stk.append(l[:-1])
def list_index(default):
def _list_index(stk):
i = stk.pop()
l = stk.pop()
stk.append(
l[i] if i >= 0 and i < len(l) else
default)
return _list_index
def list_slice(stk):
end = max(stk.pop(), 0)
start = max(stk.pop(), 0)
l = stk.pop()
stk.append(l[start:end])
_EMPTY_BAG = Bag()
def _compile(e, env : {str:int}, out):
if isinstance(e, EVar):
i = env[e.id]
if isinstance(i, int):
def load_var(stk):
stk.append(stk[i])
out.append(load_var)
else:
def load_bound(stk):
stk.append(i())
out.append(load_bound)
elif isinstance(e, EBool):
out.append(push_true if e.val else push_false)
elif isinstance(e, ENum):
s = e.val
if e.type == FLOAT:
s = Fraction(s)
def push_num(stk):
stk.append(s)
out.append(push_num)
elif isinstance(e, EStr):
s = e.val
def push_str(stk):
stk.append(s)
out.append(push_str)
elif isinstance(e, EEnumEntry):
s = e.name
def push_enum(stk):
stk.append(s)
out.append(push_enum)
elif isinstance(e, EEmptyList):
def push_empty_list(stk):
stk.append(_EMPTY_BAG)
out.append(push_empty_list)
elif isinstance(e, ESingleton):
_compile(e.e, env, out)
if isinstance(e.type, TList):
out.append(make_singleton_list)
else:
out.append(make_singleton_bag)
elif isinstance(e, EHandle):
_compile(e.addr, env, out)
_compile(e.value, env, out)
out.append(make_handle)
elif isinstance(e, ENull):
out.append(push_null)
elif isinstance(e, ECond):
_compile(e.cond, env, out)
then_code = []; _compile(e.then_branch, env, then_code)
else_code = []; _compile(e.else_branch, env, else_code)
def ite(stk):
return then_code if stk.pop() else else_code
out.append(ite)
elif isinstance(e, EMakeRecord):
for (f, ee) in e.fields:
_compile(ee, env, out)
def make_record(stk):
stk.append(FrozenDict((f, stk.pop()) for (f, _) in reversed(e.fields)))
out.append(make_record)
elif isinstance(e, EGetField):
_compile(e.e, env, out)
if isinstance(e.e.type, THandle):
assert e.f == "val"
out.append(get_handle_value)
else:
assert isinstance(e.e.type, TRecord)
f = e.f
def get_field(stk):
stk.append(stk.pop()[f])
out.append(get_field)
elif isinstance(e, ETuple):
n = len(e.es)
for ee in e.es:
_compile(ee, env, out)
def make_tuple(stk):
entries = reversed([stk.pop() for i in range(n)])
stk.append(tuple(entries))
out.append(make_tuple)
elif isinstance(e, ETupleGet):
_compile(e.e, env, out)
def tuple_get(stk):
stk.append(stk.pop()[e.n])
out.append(tuple_get)
elif isinstance(e, EStateVar):
_compile(e.e, env, out)
elif isinstance(e, ENative):
_compile(e.e, env, out)
def make_native(stk):
stk.append((e.type.name, stk.pop()))
out.append(make_native)
elif isinstance(e, EUnaryOp):
_compile(e.e, env, out)
if e.op == UOp.Not:
out.append(unaryop_not)
elif e.op == UOp.Sum:
out.append(unaryop_sum)
elif e.op == UOp.Exists:
out.append(unaryop_exists)
elif e.op == UOp.Empty:
out.append(unaryop_empty)
elif e.op == UOp.All:
out.append(unaryop_all)
elif e.op == UOp.Any:
out.append(unaryop_any)
elif e.op == UOp.Length:
out.append(unaryop_len)
elif e.op == UOp.AreUnique:
out.append(unaryop_areunique(e.e.type.t))
elif e.op == UOp.Distinct:
out.append(unaryop_distinct(e.e.type.t))
elif e.op == UOp.The:
out.append(unaryop_the(default=mkval(e.type)))
elif e.op == UOp.Reversed:
out.append(unaryop_reversed)
elif e.op == "-":
out.append(unaryop_neg)
else:
raise NotImplementedError(e.op)
elif isinstance(e, EBinOp):
if e.op == BOp.And:
return _compile(ECond(e.e1, e.e2, F).with_type(BOOL), env, out)
elif e.op == BOp.Or:
return _compile(ECond(e.e1, T, e.e2).with_type(BOOL), env, out)
elif e.op == "=>":
return _compile(ECond(e.e1, e.e2, T).with_type(BOOL), env, out)
_compile(e.e1, env, out)
_compile(e.e2, env, out)
e1type = e.e1.type
if e.op == "+":
if is_collection(e.type):
out.append(binaryop_add_sets if isinstance(e.type, TSet) else binaryop_add_collections)
else:
out.append(binaryop_add_numbers)
elif e.op == "*":
out.append(binaryop_mul)
elif e.op == "-":
if isinstance(e.type, TBag) or isinstance(e.type, TSet):
out.append(binaryop_sub_bags(e.type.t))
elif isinstance(e.type, TList):
out.append(binaryop_sub_lists(e.type.t))
else:
out.append(binaryop_sub)
elif e.op == "==":
out.append(binaryop_eq(e1type))
elif e.op == "===":
out.append(binaryop_eq(e1type, deep=True))
elif e.op == "<":
out.append(binaryop_lt(e1type))
elif e.op == ">":
out.append(binaryop_gt(e1type))
elif e.op == "<=":
out.append(binaryop_le(e1type))
elif e.op == ">=":
out.append(binaryop_ge(e1type))
elif e.op == "!=":
out.append(binaryop_ne(e1type))
elif e.op == BOp.In:
out.append(binaryop_in(e1type))
else:
raise NotImplementedError(e.op)
elif isinstance(e, EListGet):
_compile(e.e, env, out)
_compile(e.index, env, out)
out.append(list_index(mkval(e.type)))
elif isinstance(e, EListSlice):
_compile(e.e, env, out)
_compile(e.start, env, out)
_compile(e.end, env, out)
out.append(list_slice)
elif isinstance(e, EDropFront):
_compile(e.e, env, out)
out.append(drop_front)
elif isinstance(e, EDropBack):
_compile(e.e, env, out)
out.append(drop_back)
elif isinstance(e, EFilter):
_compile(e.e, env, out)
box = [None]
body = []
with extend(env, e.p.arg.id, lambda: box[0]):
_compile(e.p.body, env, body)
def set_arg(v):
def set_arg(stk):
box[0] = v
return set_arg
def maybe_append_to_result(idx):
return lambda stk: (stk[idx].append(box[0]) if stk.pop() else None)
def do_filter(stk):
bag = stk.pop()
res_idx = len(stk)
stk.append([])
ops = []
for (i, val) in enumerate(bag):
ops.append(set_arg(val))
ops.extend(body)
ops.append(maybe_append_to_result(res_idx))
return ops
out.append(do_filter)
out.append(iterable_to_bag)
elif isinstance(e, EMap):
_compile(e.e, env, out)
box = [None]
body = []
with extend(env, e.f.arg.id, lambda: box[0]):
_compile(e.f.body, env, body)
def set_arg(v):
def set_arg(stk):
box[0] = v
return set_arg
def append_to_result(idx):
return lambda stk: stk[idx].append(stk.pop())
def do_map(stk):
bag = stk.pop()
res_idx = len(stk)
stk.append([])
ops = []
for (i, val) in enumerate(bag):
ops.append(set_arg(val))
ops.extend(body)
ops.append(append_to_result(res_idx))
return ops
out.append(do_map)
out.append(iterable_to_bag)
elif isinstance(e, EFlatMap):
_compile(EMap(e.e, e.f).with_type(TBag(e.type)), env, out)
out.append(do_concat)
elif isinstance(e, EArgMin) or isinstance(e, EArgMax):
# stack layout:
# len | f(best) | best | elem_0 | ... | elem_len
# body is a seq. of opcodes that has the effect of pushing
# f(top_of_stack) onto the stack, leaving the old top underneath
box = [None]
def set_arg(stk):
box[0] = stk[-1]
body = [set_arg]
with extend(env, e.f.arg.id, lambda: box[0]):
_compile(e.f.body, env, body)
keytype = e.f.body.type
def initialize(stk):
bag = stk.pop()
if bag:
stk.extend(reversed(bag))
else:
stk.append(mkval(e.type))
return body + [push(len(bag)-1)]
do_cmp = binaryop_lt(keytype) if isinstance(e, EArgMin) else binaryop_gt(keytype)
def loop(stk):
len = stk.pop()
key = stk.pop()
if len > 0:
best = stk.pop()
return body + [dup, push(key), do_cmp, if_then_else(
[],
[drop, drop, push(best), push(key)]), push(len-1), loop]
_compile(e.e, env, out)
out.append(initialize)
out.append(loop)
elif isinstance(e, EMakeMap2):
_compile(EMap(e.e, ELambda(e.value.arg, ETuple((e.value.arg, e.value.body)).with_type(TTuple((e.value.arg.type, e.value.body.type))))).with_type(TBag(TTuple((e.value.arg.type, e.value.body.type)))), env, out)
default = mkval(e.type.v)
def make_map(stk):
res = Map(e.type, default)
for (k, v) in reversed(list(stk.pop())):
res[k] = v
stk.append(res)
out.append(make_map)
elif isinstance(e, EMapGet):
_compile(e.map, env, out)
_compile(e.key, env, out)
out.append(read_map)
elif isinstance(e, EHasKey):
_compile(e.map, env, out)
_compile(e.key, env, out)
out.append(has_key(e.key.type))
elif isinstance(e, EMapKeys):
_compile(e.e, env, out)
out.append(read_map_keys)
elif isinstance(e, ECall):
_compile(EVar(e.func), env, out)
for a in e.args:
_compile(a, env, out)
n = len(e.args)
def call(stk):
args = reversed([stk.pop() for i in range(n)])
f = stk.pop()
stk.append(f(*args))
out.append(call)
elif isinstance(e, ELet):
_compile(e.e, env, out)
box = [None]
def set_arg(v):
def set_arg(stk):
box[0] = v
return set_arg
def do_bind(stk):
return [set_arg(stk.pop())]
out.append(do_bind)
with extend(env, e.f.arg.id, lambda: box[0]):
_compile(e.f.body, env, out)
else:
h = extension_handler(type(e))
if h is not None:
_compile(h.encode(e), env, out)
else:
raise NotImplementedError(type(e))
if hasattr(e, "type") and isinstance(e.type, TList):
out.append(iterable_to_list)
| 30.468966 | 216 | 0.568493 |
79469db16c1ae0ea106551d11ce41b87aa329afc | 1,219 | py | Python | benchmarks/comprehension.py | wangwansan/grumpy | aff3b3ff62a34c5baa9783307764f640df021337 | [
"Apache-2.0"
] | 11,252 | 2017-01-04T16:19:12.000Z | 2022-03-31T13:42:31.000Z | benchmarks/comprehension.py | wangwansan/grumpy | aff3b3ff62a34c5baa9783307764f640df021337 | [
"Apache-2.0"
] | 301 | 2017-01-04T17:34:00.000Z | 2022-03-15T21:40:21.000Z | benchmarks/comprehension.py | wangwansan/grumpy | aff3b3ff62a34c5baa9783307764f640df021337 | [
"Apache-2.0"
] | 819 | 2017-01-04T17:26:26.000Z | 2022-03-20T14:11:28.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarks for comprehensions."""
# pylint: disable=unused-argument
import weetest
def BenchmarkGeneratorExpCreate(b):
l = []
for _ in xrange(b.N):
(x for x in l) # pylint: disable=pointless-statement
def BenchmarkGeneratorExpIterate(b):
for _ in (x for x in xrange(b.N)):
pass
def BenchmarkListCompCreate(b):
for _ in xrange(b.N):
[x for x in xrange(1000)] # pylint: disable=expression-not-assigned
def BenchmarkDictCompCreate(b):
for _ in xrange(b.N):
{x: x for x in xrange(1000)} # pylint: disable=expression-not-assigned
if __name__ == '__main__':
weetest.RunBenchmarks()
| 27.088889 | 75 | 0.727646 |
79469de5422df8b871a2ff04e92c210bc18fff0c | 5,051 | py | Python | magrathea/core/feed/entry.py | RootForum/magrathea | 85bf611c749969225b079d2177834a86a9905ef0 | [
"MIT"
] | 1 | 2017-07-22T13:07:05.000Z | 2017-07-22T13:07:05.000Z | magrathea/core/feed/entry.py | RootForum/magrathea | 85bf611c749969225b079d2177834a86a9905ef0 | [
"MIT"
] | null | null | null | magrathea/core/feed/entry.py | RootForum/magrathea | 85bf611c749969225b079d2177834a86a9905ef0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
magrathea.core.feed.entry
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2014 by the RootForum.org team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
import base64
import calendar
import time
from ...utils.convert import to_str, to_bytes
from .info import FeedInfo
def get_entry_id(entry):
"""
Retrieve the unique identifier of a :py:mod:`feedparser` entry object.
Magrathea uses this internally for a identifying entry objects.
:param entry: :py:mod:`feedparser` entry object
"""
if hasattr(entry, 'id'):
return base64.b64encode(to_bytes(entry.id))
if hasattr(entry, 'link'):
return base64.b64encode(to_bytes(entry.link))
return None
class Entry(object):
"""
Class representing a feed entry. To ease sorting of entries,
each entry offers a sort key (``key`` property) constructed
from its update date. If the feed does not provide the updated
date, the publish date or the creation date are used.
:param entry: A :py:mod:`feedparser` entry object
"""
def __init__(self, entry):
self._id = get_entry_id(entry)
self._key = None
self._updated = None
self._expired = None
self._link = None
self._content = None
self._description = None
self._title = None
self._author = None
self._feed = None
self._parse_entry(entry)
def update(self, entry):
"""
Update feed entry with new information.
:param entry: A :py:mod:`feedparser` entry object
"""
self._parse_entry(entry)
def _parse_entry(self, entry):
if hasattr(entry, 'updated_parsed'):
self._updated = entry.updated_parsed
if hasattr(entry, 'published_parsed') and not self._updated:
self._updated = entry.published_parsed
if hasattr(entry, 'created_parsed') and not self._updated:
self._updated = entry.created_parsed
if hasattr(entry, 'expired_parsed'):
self._updated = entry.expired_parsed
if hasattr(entry, 'link'):
self._link = entry.link
if hasattr(entry, 'content'):
self._content = []
for element in entry.content:
self._content.append(element.value)
if hasattr(entry, 'description'):
self._description = entry.description
if hasattr(entry, 'title'):
self._title = entry.title
if hasattr(entry, 'author'):
self._author = entry.author
if self._updated:
self._key = time.strftime('%Y%m%d%H%M%S', self._updated)
@property
def id(self):
"""
Unique identifier of the entry
"""
return self._id
@property
def key(self):
"""
Time-based sorting key
"""
return self._key
@property
def body(self):
"""
Content body of the entry
"""
if self._content:
# noinspection PyTypeChecker
return " ".join(to_str(self._content))
if self._description:
return to_str(self._description)
return ""
@property
def title(self):
"""
Title of the entry
"""
return to_str(self._title)
@property
def pubdate_gmt(self):
"""
Date when the entry was last updated, published or otherwise changed in GMT
"""
return self._updated
@property
def pubdate_local(self):
"""
Date when the entry was last updated, published or otherwise changed converted to local time
"""
return time.localtime(calendar.timegm(self._updated))
@property
def author(self):
"""
Author of the entry
"""
return to_str(self._author)
@property
def feed(self):
"""
Feed the entry comes from.
Available sub-attributes: :py:attr:`~magrathea.core.feed.feed.FeedInfo.author`,
:py:attr:`~magrathea.core.feed.feed.FeedInfo.title`, :py:attr:`~magrathea.core.feed.feed.FeedInfo.uri` and
:py:attr:`~magrathea.core.feed.feed.FeedInfo.type`.
"""
return self._feed
@feed.setter
def feed(self, feed):
if isinstance(feed, FeedInfo):
self._feed = feed
def get_pubdate_gmt(self, format):
"""
Get the :py:attr:`~magrathea.core.feed.entry.Entry.pubdate_gmt` (GMT) formatted via :py:func:`time.strftime`.
:param str format: format string understood by :py:func:`time.strftime`
"""
return time.strftime(format, self._updated)
def get_pubdate_local(self, format):
"""
Get the :py:attr:`~magrathea.core.feed.entry.Entry.pubdate_local` (local) formatted via
:py:func:`time.strftime`.
:param str format: format string understood by :py:func:`time.strftime`
"""
return time.strftime(format, time.localtime(calendar.timegm(self._updated)))
| 29.538012 | 117 | 0.601663 |
79469e718645f32631873f3f1276349a6a6c7b8c | 423 | py | Python | setup.py | ljwolf/seatsvotes | 6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b | [
"MIT"
] | null | null | null | setup.py | ljwolf/seatsvotes | 6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b | [
"MIT"
] | null | null | null | setup.py | ljwolf/seatsvotes | 6d44bba02016cc7ac24cebf6e0d70e1e9e801a5b | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='seatsvotes',
version='0.0.1',
description='tools to conduct seats votes modeling',
url='https://github.com/ljwolf/seatsvotes',
author='Levi John Wolf',
author_email='[email protected]',
license='3-Clause BSD',
packages=['seatsvotes'],
install_requires=['pandas', 'pysal', 'statsmodels', 'scikit-learn'],
zip_safe=False)
| 32.538462 | 74 | 0.654846 |
79469e7655b1bc90e3a75ea2112b6d26f0962412 | 1,450 | py | Python | paasta_tools/list_tron_namespaces.py | sofyat/paasta | 6765ed992ec224bf78741ffcd615a6a6c6ffb780 | [
"Apache-2.0"
] | null | null | null | paasta_tools/list_tron_namespaces.py | sofyat/paasta | 6765ed992ec224bf78741ffcd615a6a6c6ffb780 | [
"Apache-2.0"
] | null | null | null | paasta_tools/list_tron_namespaces.py | sofyat/paasta | 6765ed992ec224bf78741ffcd615a6a6c6ffb780 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2015-2018 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from paasta_tools import tron_tools
from paasta_tools.utils import paasta_print
def parse_args():
parser = argparse.ArgumentParser(
description="Lists Tron namespaces for a cluster, excluding MASTER"
)
parser.add_argument(
"-c",
"--cluster",
dest="cluster",
default=None,
help="Use a different Tron cluster",
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
default=tron_tools.DEFAULT_SOA_DIR,
help="Use a different soa config directory",
)
args = parser.parse_args()
return args
def main():
args = parse_args()
namespaces = tron_tools.get_tron_namespaces(
cluster=args.cluster, soa_dir=args.soa_dir
)
paasta_print("\n".join(namespaces))
if __name__ == "__main__":
main()
| 27.884615 | 75 | 0.683448 |
79469ff3c4a8d013fce0bb56c6dee8041dee039b | 7,848 | py | Python | bokeh/themes/theme.py | dkapitan/bokeh | d518cecd1d9919db49e3c0033e8c1b89db9965bf | [
"BSD-3-Clause"
] | 1 | 2020-02-07T16:57:56.000Z | 2020-02-07T16:57:56.000Z | bokeh/themes/theme.py | dkapitan/bokeh | d518cecd1d9919db49e3c0033e8c1b89db9965bf | [
"BSD-3-Clause"
] | 1 | 2021-05-11T23:19:27.000Z | 2021-05-11T23:19:27.000Z | bokeh/themes/theme.py | dkapitan/bokeh | d518cecd1d9919db49e3c0033e8c1b89db9965bf | [
"BSD-3-Clause"
] | 1 | 2020-03-06T07:38:50.000Z | 2020-03-06T07:38:50.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a ``Theme`` class for specifying new default values for Bokeh
:class:`~bokeh.model.Model` properties.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import yaml
# Bokeh imports
from ..core.has_props import HasProps
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# whenever we cache that there's nothing themed for a class, we
# use this same dict instance, so we don't have a zillion empty
# dicts in our caches.
_empty_dict = dict()
__all__ = (
'Theme',
)
#-----------------------------------------------------------------------------
# General API
#----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
# Note: in DirectoryHandler and in general we assume this is an
# immutable object, because we share it among sessions and we
# don't monitor it for changes. If you make this mutable by adding
# any kind of setter, you could have to refactor some other code.
class Theme(object):
''' Provide new default values for Bokeh models.
Bokeh Model properties all have some built-in default value. If a property
has not been explicitly set (e.g. ``m.foo = 10``) then accessing the
property with return the default value. It may be useful for users to be
able to specify a different set of default values than the built-in
default. The ``Theme`` class allows collections of custom default values
to be easily applied to Bokeh documents.
The ``Theme`` class can be constructed either from a YAML file or from a
JSON dict (but not both). Examples of both formats are shown below.
The plotting API's defaults override some theme properties. Namely:
`fill_alpha`, `fill_color`, `line_alpha`, `line_color`, `text_alpha` and
`text_color`. Those properties should therefore be set explicitly when
using the plotting API.
Args:
filename (str, optional) : path to a YAML theme file
json (str, optional) : a JSON dictionary specifying theme values
Raises:
ValueError
If neither ``filename`` or ``json`` is supplied.
Examples:
Themes are specified by providing a top-level key ``attrs`` which
has blocks for Model types to be themed. Each block has keys and
values that specify the new property defaults for that type.
Here is an example theme in YAML format that sets various visual
properties for all figures, grids, and titles:
.. code-block:: yaml
attrs:
Figure:
background_fill_color: '#2F2F2F'
border_fill_color: '#2F2F2F'
outline_line_color: '#444444'
Grid:
grid_line_dash: [6, 4]
grid_line_alpha: .3
Title:
text_color: "white"
Here is the same theme, in JSON format:
.. code-block:: python
{
'attrs' : {
'Figure' : {
'background_fill_color': '#2F2F2F',
'border_fill_color': '#2F2F2F',
'outline_line_color': '#444444',
},
'Grid': {
'grid_line_dash': [6, 4]',
'grid_line_alpha': .3,
},
'Title': {
'text_color': 'white'
}
}
'''
def __init__(self, filename=None, json=None):
if (filename is not None) and (json is not None):
raise ValueError("Theme should be constructed from a file or from json not both")
if filename is not None:
with open(filename) as f:
json = yaml.safe_load(f)
# empty docs result in None rather than {}, fix it.
if json is None:
json = {}
if json is None:
raise ValueError("Theme requires json or a filename to construct")
self._json = json
if 'attrs' not in self._json:
self._json['attrs'] = {}
if not isinstance(self._json['attrs'], dict):
raise ValueError("theme problem: attrs field should be a dictionary of class names, not %r" % (self._json['attrs']))
for key, value in self._json['attrs'].items():
if not isinstance(value, dict):
raise ValueError("theme problem: attrs.%s should be a dictionary of properties, not %r" % (key, value))
self._line_defaults = self._json.get('line_defaults', _empty_dict)
self._fill_defaults = self._json.get('fill_defaults', _empty_dict)
self._text_defaults = self._json.get('text_defaults', _empty_dict)
# mapping from class name to the full set of properties
# (including those merged in from base classes) for that
# class.
self._by_class_cache = {}
def _add_glyph_defaults(self, cls, props):
from ..models.glyphs import Glyph
if issubclass(cls, Glyph):
if hasattr(cls, "line_alpha"):
props.update(self._line_defaults)
if hasattr(cls, "fill_alpha"):
props.update(self._fill_defaults)
if hasattr(cls, "text_alpha"):
props.update(self._text_defaults)
def _for_class(self, cls):
if cls.__name__ not in self._by_class_cache:
attrs = self._json['attrs']
combined = {}
# we go in reverse order so that subclass props override base class
for base in cls.__mro__[-2::-1]:
if not issubclass(base, HasProps):
continue
self._add_glyph_defaults(base, combined)
combined.update(attrs.get(base.__name__, _empty_dict))
if len(combined) == 0:
combined = _empty_dict
self._by_class_cache[cls.__name__] = combined
return self._by_class_cache[cls.__name__]
def apply_to_model(self, model):
''' Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the :class:`~bokeh.document.Document` the model is a part of.
'''
model.apply_theme(self._for_class(model.__class__))
# a little paranoia because it would be Bad(tm) to mess
# this up... would be nicer if python had a way to freeze
# the dict.
if len(_empty_dict) > 0:
raise RuntimeError("Somebody put stuff in _empty_dict")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------
| 38.470588 | 128 | 0.507008 |
7946a07e6a738a4866b87072b28c6c3e9b2fdb77 | 1,414 | py | Python | ingenico/direct/sdk/domain/fixed_list_validator.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | null | null | null | ingenico/direct/sdk/domain/fixed_list_validator.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | 1 | 2021-03-30T12:55:39.000Z | 2021-04-08T08:23:27.000Z | ingenico/direct/sdk/domain/fixed_list_validator.py | Ingenico/direct-sdk-python3 | d2b30b8e8afb307153a1f19ac4c054d5344449ce | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://support.direct.ingenico.com/documentation/api/reference/
#
from typing import List
from ingenico.direct.sdk.data_object import DataObject
class FixedListValidator(DataObject):
__allowed_values = None
@property
def allowed_values(self) -> List[str]:
"""
Type: list[str]
"""
return self.__allowed_values
@allowed_values.setter
def allowed_values(self, value: List[str]):
self.__allowed_values = value
def to_dictionary(self):
dictionary = super(FixedListValidator, self).to_dictionary()
if self.allowed_values is not None:
dictionary['allowedValues'] = []
for element in self.allowed_values:
if element is not None:
dictionary['allowedValues'].append(element)
return dictionary
def from_dictionary(self, dictionary):
super(FixedListValidator, self).from_dictionary(dictionary)
if 'allowedValues' in dictionary:
if not isinstance(dictionary['allowedValues'], list):
raise TypeError('value \'{}\' is not a list'.format(dictionary['allowedValues']))
self.allowed_values = []
for element in dictionary['allowedValues']:
self.allowed_values.append(element)
return self
| 32.883721 | 97 | 0.6471 |
7946a0cf105413db4fce928c4b26a3a9e25dec7f | 5,107 | py | Python | tests/test_utils/system_tests_class.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | 5 | 2020-07-17T07:33:58.000Z | 2022-03-02T06:23:47.000Z | tests/test_utils/system_tests_class.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | 7 | 2020-06-03T14:55:17.000Z | 2021-12-30T00:01:50.000Z | tests/test_utils/system_tests_class.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | 12 | 2020-01-09T14:02:39.000Z | 2022-01-24T07:18:51.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from contextlib import ContextDecorator
from shutil import move
from tempfile import mkdtemp
from unittest import SkipTest, TestCase
from airflow import AirflowException, models
from airflow.configuration import AIRFLOW_HOME, AirflowConfigParser, get_airflow_config
from airflow.utils import db
from airflow.utils.log.logging_mixin import LoggingMixin
AIRFLOW_MAIN_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
DEFAULT_DAG_FOLDER = os.path.join(AIRFLOW_MAIN_FOLDER, "airflow", "example_dags")
SKIP_SYSTEM_TEST_WARNING = """Skipping system test.
To allow system test set ENABLE_SYSTEM_TESTS=true.
"""
def resolve_dags_folder() -> str:
"""
Returns DAG folder specified in current Airflow config.
"""
config_file = get_airflow_config(AIRFLOW_HOME)
conf = AirflowConfigParser()
conf.read(config_file)
try:
dags = conf.get("core", "dags_folder")
except AirflowException:
dags = os.path.join(AIRFLOW_HOME, 'dags')
return dags
class empty_dags_directory( # pylint: disable=invalid-name
ContextDecorator, LoggingMixin
):
"""
Context manager that temporally removes DAGs from provided directory.
"""
def __init__(self, dag_directory: str) -> None:
super().__init__()
self.dag_directory = dag_directory
self.temp_dir = mkdtemp()
def __enter__(self) -> str:
self._store_dags_to_temporary_directory(self.dag_directory, self.temp_dir)
return self.temp_dir
def __exit__(self, *args, **kwargs) -> None:
self._restore_dags_from_temporary_directory(self.dag_directory, self.temp_dir)
def _store_dags_to_temporary_directory(
self, dag_folder: str, temp_dir: str
) -> None:
self.log.info(
"Storing DAGS from %s to temporary directory %s", dag_folder, temp_dir
)
try:
os.mkdir(dag_folder)
except OSError:
pass
for file in os.listdir(dag_folder):
move(os.path.join(dag_folder, file), os.path.join(temp_dir, file))
def _restore_dags_from_temporary_directory(
self, dag_folder: str, temp_dir: str
) -> None:
self.log.info(
"Restoring DAGS to %s from temporary directory %s", dag_folder, temp_dir
)
for file in os.listdir(temp_dir):
move(os.path.join(temp_dir, file), os.path.join(dag_folder, file))
class SystemTest(TestCase, LoggingMixin):
def run(self, result=None):
if os.environ.get('ENABLE_SYSTEM_TESTS') != 'true':
raise SkipTest(SKIP_SYSTEM_TEST_WARNING)
return super().run(result)
def setUp(self) -> None:
"""
We want to avoid random errors while database got reset - those
Are apparently triggered by parser trying to parse DAGs while
The tables are dropped. We move the dags temporarily out of the dags folder
and move them back after reset
"""
dag_folder = resolve_dags_folder()
with empty_dags_directory(dag_folder):
db.resetdb()
super().setUp()
def run_dag(self, dag_id: str, dag_folder: str = DEFAULT_DAG_FOLDER) -> None:
"""
Runs example dag by it's ID.
:param dag_id: id of a DAG to be run
:type dag_id: str
:param dag_folder: directory where to look for the specific DAG. Relative to AIRFLOW_HOME.
:type dag_folder: str
"""
self.log.info("Looking for DAG: %s in %s", dag_id, dag_folder)
dag_bag = models.DagBag(dag_folder=dag_folder, include_examples=False)
dag = dag_bag.get_dag(dag_id)
if dag is None:
raise AirflowException(
"The Dag {dag_id} could not be found. It's either an import problem,"
"wrong dag_id or DAG is not in provided dag_folder."
"The content of the {dag_folder} folder is {content}".format(
dag_id=dag_id,
dag_folder=dag_folder,
content=os.listdir(dag_folder),
)
)
self.log.info("Attempting to run DAG: %s", dag_id)
dag.clear(reset_dag_runs=True)
dag.run(ignore_first_depends_on_past=True, verbose=True)
| 36.478571 | 98 | 0.673389 |
7946a0d1aad524bd14e83eb3724ee4288fde68ec | 1,390 | py | Python | memory.py | ppeigne/DQN-reimplementation | ce5e11f9b6c7c0dc42f9a65fee872a70ac5c6415 | [
"MIT"
] | null | null | null | memory.py | ppeigne/DQN-reimplementation | ce5e11f9b6c7c0dc42f9a65fee872a70ac5c6415 | [
"MIT"
] | null | null | null | memory.py | ppeigne/DQN-reimplementation | ce5e11f9b6c7c0dc42f9a65fee872a70ac5c6415 | [
"MIT"
] | null | null | null | import numpy as np
import torch as T
from typing import Tuple
class Memory():
def __init__(self, size:int, state_shape: Tuple[int, ...]) -> None:
self.size = size
self.current_states = T.zeros((size, *state_shape), dtype=T.float32)
self.actions = T.zeros(size, dtype=T.int64)
self.rewards = T.zeros(size, dtype=T.float32)
self.next_states = T.zeros((size, *state_shape), dtype=T.float32)
self.dones = T.zeros(size, dtype=T.bool)
self.idx_last = 0
def collect_experience(self, current_state, action, reward, next_state, done) -> None:
index = self.idx_last % self.size
self.current_states[index] = T.tensor(current_state)
self.actions[index] = T.tensor(action)
self.rewards[index] = T.tensor(reward)
self.next_states[index] = T.tensor(next_state)
self.dones[index] = T.tensor(done)
self.idx_last += 1
def get_sample(self, sample_size: int) -> Tuple[T.Tensor, ...]:
if sample_size > self.idx_last:
raise ValueError()
past_frontier = min(self.size, self.idx_last)
sample_idxs = np.random.choice(past_frontier, size=sample_size, replace=False)
return (self.current_states[sample_idxs], self.actions[sample_idxs], self.rewards[sample_idxs],
self.next_states[sample_idxs], self.dones[sample_idxs]) | 40.882353 | 103 | 0.651079 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.