id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1693443
|
from PyQt5 import QtCore
def load_style_sheet():
"""
Loads style.qss content.
:return: style sheet for :class:`main.MainWindow`
:rtype: str
"""
style = QtCore.QFile(f'style.qss')
if not style.exists():
return
else:
style.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text)
text = QtCore.QTextStream(style)
style_sheet = text.readAll()
return style_sheet
|
StarcoderdataPython
|
1763855
|
from nzpy.core import (ArrayContentNotHomogenousError,
ArrayContentNotSupportedError,
ArrayDimensionsNotConsistentError, BINARY,
Binary, Connection, Cursor, DataError,
DatabaseError, Date, DateFromTicks, Error,
IntegrityError, InterfaceError, InternalError,
Interval, LogOptions, NotSupportedError,
OperationalError, PGEnum, PGJson, PGJsonb,
PGText, PGTsvector, PGVarchar, ProgrammingError,
Time, TimeFromTicks, Timestamp, TimestampFromTicks,
Warning)
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# Copyright (c) 2007-2009, <NAME>
# Copyright (c) The Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "<NAME>"
def connect(user, host='localhost', unix_sock=None, port=5432, database=None,
password=<PASSWORD>, ssl=None, securityLevel=0, timeout=None,
application_name=None, max_prepared_statements=1000,
datestyle='ISO', logLevel=0, tcp_keepalive=True,
char_varchar_encoding='latin', logOptions=LogOptions.Inherit,
pgOptions=None):
return Connection(user, host, unix_sock, port, database, password, ssl,
securityLevel, timeout, application_name,
max_prepared_statements, datestyle, logLevel,
tcp_keepalive, char_varchar_encoding,
logOptions, pgOptions)
apilevel = "2.0"
"""The DBAPI level supported, currently "2.0".
This property is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
threadsafety = 1
"""Integer constant stating the level of thread safety the DBAPI interface
supports. This DBAPI module supports sharing of the module only. Connections
and cursors my not be shared between threads. This gives nzpy a threadsafety
value of 1.
This property is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
"""
paramstyle = 'qmark'
max_prepared_statements = 1000
# I have no idea what this would be used for by a client app. Should it be
# TEXT, VARCHAR, CHAR? It will only compare against row_description's
# type_code if it is this one type. It is the varchar type oid for now, this
# appears to match expectations in the DB API 2.0 compliance test suite.
STRING = 1043
"""String type oid."""
NUMBER = 1700
"""Numeric type oid"""
DATETIME = 1114
"""Timestamp type oid"""
ROWID = 26
"""ROWID type oid"""
__all__ = [
Warning, DataError, DatabaseError, connect, InterfaceError,
ProgrammingError, Error, OperationalError, IntegrityError, InternalError,
NotSupportedError, ArrayContentNotHomogenousError,
ArrayDimensionsNotConsistentError, ArrayContentNotSupportedError,
Connection, Cursor, Binary, Date, DateFromTicks, Time, TimeFromTicks,
Timestamp, TimestampFromTicks, BINARY, Interval, PGEnum, PGJson, PGJsonb,
PGTsvector, PGText, PGVarchar]
"""Version string for nzpy.
.. versionadded:: 1.9.11
"""
|
StarcoderdataPython
|
150839
|
<reponame>hafeez3000/taurus
import os
from collections import Counter
import time
from tests import BZTestCase, random_datapoint
from tests.mocks import EngineEmul
from bzt.modules.blazemeter import BlazeMeterUploader, CloudProvisioning
from bzt.modules.reporting import FinalStatus
from bzt.utils import BetterDict
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.modules.functional import ResultsTree, FunctionalSample
class TestFinalStatusReporter(BZTestCase):
def test_log_messages_failed_labels(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
self.sniff_log(obj.log)
obj.parameters.merge({"failed-labels": True, "percentiles": False, "summary": False, "test-duration": False})
obj.startup()
obj.shutdown()
obj.aggregated_second(self.__get_datapoint())
obj.post_process()
self.assertIn("29656 failed samples: http://192.168.1.1/anotherquery\n", self.log_recorder.info_buff.getvalue())
def test_log_messages_percentiles(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
self.sniff_log(obj.log)
obj.parameters.merge({"failed-labels": False, "percentiles": True, "summary": False, "test-duration": False})
obj.startup()
obj.shutdown()
obj.aggregated_second(self.__get_datapoint())
obj.post_process()
target_output = ("Average times: total 0.001, latency 0.000, connect 0.000\n"
"Percentile 0.0%: 0.000\n"
"Percentile 50.0%: 0.000\n"
"Percentile 90.0%: 0.001\n"
"Percentile 95.0%: 0.001\n"
"Percentile 99.0%: 0.003\n"
"Percentile 99.9%: 0.008\n"
"Percentile 100.0%: 0.081\n"
)
self.assertEqual(target_output, self.log_recorder.info_buff.getvalue())
def test_log_messages_samples_count(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
self.sniff_log(obj.log)
obj.parameters.merge({"failed-labels": False, "percentiles": False, "summary": True, "test-duration": False})
obj.aggregated_second(self.__get_datapoint())
obj.startup()
obj.shutdown()
obj.post_process()
self.assertEqual("Samples count: 59314, 50.00% failures\n", self.log_recorder.info_buff.getvalue())
def test_log_messages_duration(self):
"""
Test duration report
:return:
"""
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
self.sniff_log(obj.log)
obj.prepare()
obj.startup()
obj.shutdown()
obj.start_time -= 120005
obj.post_process()
self.assertEqual("Test duration: 1 day, 9:20:05\n", self.log_recorder.info_buff.getvalue())
def test_dump(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
self.sniff_log(obj.log)
obj.parameters.merge({
"dump-xml": obj.engine.create_artifact("status", ".xml"),
"dump-csv": obj.engine.create_artifact("status", ".csv")
})
obj.aggregated_second(random_datapoint(time.time()))
obj.startup()
obj.shutdown()
obj.post_process()
self.assertIn("XML", self.log_recorder.info_buff.getvalue())
def test_func_report(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
self.sniff_log(obj.log)
obj.prepare()
obj.startup()
obj.shutdown()
obj.aggregated_results(*self.__get_func_tree())
obj.post_process()
info_log = self.log_recorder.info_buff.getvalue()
warn_log = self.log_recorder.warn_buff.getvalue()
self.assertIn("Total: 3 tests", info_log)
self.assertIn("Test TestClass.case2 failed: something broke", warn_log)
self.assertIn("stacktrace2", warn_log)
self.assertIn("Test TestClass.case3 failed: something is badly broken", warn_log)
self.assertIn("stacktrace3", warn_log)
def test_func_report_all_no_stacktrace(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
self.sniff_log(obj.log)
obj.parameters.merge({"report-tests": "all", "print-stacktrace": False})
obj.prepare()
obj.startup()
obj.shutdown()
obj.aggregated_results(*self.__get_func_tree())
obj.post_process()
info_log = self.log_recorder.info_buff.getvalue()
self.assertIn("Total: 3 tests", info_log)
self.assertIn("Test TestClass.case1 - PASSED", info_log)
self.assertIn("Test TestClass.case2 - FAILED", info_log)
self.assertIn("Test TestClass.case3 - BROKEN", info_log)
self.assertNotIn("stacktrace2", info_log)
self.assertNotIn("stacktrace3", info_log)
def __get_datapoint(self, ts=0):
datapoint = DataPoint(ts, None)
cumul_data = datapoint[DataPoint.CUMULATIVE]
cumul_data[""] = KPISet.from_dict(
{KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
KPISet.RESP_TIMES: Counter(
{0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
0.019: 1, 0.015: 1}),
KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403'}],
KPISet.STDEV_RESP_TIME: 0.04947974228872108,
KPISet.AVG_LATENCY: 0.0002825639815220692,
KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
'100.0': 0.081, '99.0': 0.003, '50.0': 0.0},
KPISet.SUCCESSES: 29658,
KPISet.SAMPLE_COUNT: 59314,
KPISet.CONCURRENCY: 0,
KPISet.AVG_RESP_TIME: 0.0005440536804127192,
KPISet.FAILURES: 29656})
cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict(
{KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
KPISet.RESP_TIMES: Counter(
{0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341,
0.004: 121,
0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
0.009: 12, 0.011: 6,
0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1,
0.016: 1,
0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
KPISet.ERRORS: [],
KPISet.STDEV_RESP_TIME: 0.04073402130687656,
KPISet.AVG_LATENCY: 1.7196034796682178e-06,
KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
'99.9': 0.009,
'90.0': 0.001,
'100.0': 0.081,
'99.0': 0.004,
'50.0': 0.0},
KPISet.SUCCESSES: 29658,
KPISet.SAMPLE_COUNT: 29658,
KPISet.CONCURRENCY: 0,
KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0})
cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict(
{KPISet.AVG_CONN_TIME: 6.1707580253574335e-06,
KPISet.RESP_TIMES: Counter({0.0: 14941, 0.001: 13673, 0.002: 506,
0.003: 289, 0.004: 103,
0.005: 59, 0.006: 37, 0.008: 14,
0.007: 13, 0.009: 8, 0.01: 3,
0.011: 2, 0.016: 2, 0.014: 2,
0.017: 1, 0.013: 1, 0.015: 1,
0.04: 1}),
KPISet.ERRORS: [
{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
'urls': Counter(
{'http://192.168.1.1/anotherquery': 7373}),
KPISet.RESP_CODES: '403'}],
KPISet.STDEV_RESP_TIME: 0.032465137860758844,
KPISet.AVG_LATENCY: 0.0005634272997032645,
KPISet.RESP_CODES: Counter({'403': 29656}),
KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
'99.9': 0.008, '90.0': 0.001,
'100.0': 0.04, '99.0': 0.003,
'50.0': 0.0},
KPISet.SUCCESSES: 0,
KPISet.SAMPLE_COUNT: 29656,
KPISet.CONCURRENCY: 0,
KPISet.AVG_RESP_TIME: 0.0005716549770704078,
KPISet.FAILURES: 29656})
cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict(
{KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
KPISet.RESP_TIMES: Counter(
{0.0: 17219, 0.001: 11246, 0.002: 543,
0.003: 341, 0.004: 121,
0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
0.009: 12, 0.011: 6,
0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2,
0.079: 1, 0.016: 1,
0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
KPISet.ERRORS: [],
KPISet.STDEV_RESP_TIME: 0.04073402130687656,
KPISet.AVG_LATENCY: 1.7196034796682178e-06,
KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
'99.9': 0.009, '90.0': 0.001,
'100.0': 0.081, '99.0': 0.004,
'50.0': 0.0},
KPISet.SUCCESSES: 29658,
KPISet.SAMPLE_COUNT: 29658,
KPISet.CONCURRENCY: 0,
KPISet.AVG_RESP_TIME: 0.0005164542450603551,
KPISet.FAILURES: 0})
return datapoint
def __get_func_tree(self):
tree = ResultsTree()
tree.add_sample(FunctionalSample(test_case="case1", test_suite="TestClass", status="PASSED",
start_time=time.time(), duration=0.12,
error_msg=None, error_trace=None, extras=None, subsamples=[]))
tree.add_sample(FunctionalSample(test_case="case2", test_suite="TestClass", status="FAILED",
start_time=time.time(), duration=0.33,
error_msg="something broke", error_trace="stacktrace2", extras=None,
subsamples=[]))
tree.add_sample(FunctionalSample(test_case="case3", test_suite="TestClass", status="BROKEN",
start_time=time.time(), duration=0.33,
error_msg="something is badly broken", error_trace="stacktrace3", extras=None,
subsamples=[]))
return tree, tree
def test_blazemeter_report_link(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
xml_report = obj.engine.create_artifact("status", ".xml")
obj.parameters.merge({
"dump-xml": xml_report,
})
rep = BlazeMeterUploader()
rep.results_url = "http://report/link"
obj.engine.reporters.append(rep)
obj.startup()
obj.shutdown()
obj.aggregated_second(self.__get_datapoint())
obj.post_process()
self.assertTrue(os.path.exists(xml_report))
with open(xml_report) as fds:
report_content = fds.read()
self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_blazemeter_cloud_report_link(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
xml_report = obj.engine.create_artifact("status", ".xml")
obj.parameters.merge({
"dump-xml": xml_report,
})
prov = CloudProvisioning()
prov.results_url = "http://report/link"
obj.engine.provisioning = prov
obj.startup()
obj.shutdown()
obj.aggregated_second(self.__get_datapoint())
obj.post_process()
self.assertTrue(os.path.exists(xml_report))
with open(xml_report) as fds:
report_content = fds.read()
self.assertIn('<ReportURL>http://report/link</ReportURL>', report_content)
def test_xml_report_test_duration(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
xml_report = obj.engine.create_artifact("status", ".xml")
obj.parameters.merge({
"dump-xml": xml_report,
})
obj.startup()
obj.aggregated_second(self.__get_datapoint(ts=90))
obj.aggregated_second(self.__get_datapoint(ts=100))
obj.shutdown()
obj.post_process()
self.assertTrue(os.path.exists(xml_report))
with open(xml_report) as fds:
report_content = fds.read()
self.assertIn('<TestDuration>10.0</TestDuration>', report_content)
def test_xml_report_test_duration_failed_prepare(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
obj.aggregated_second(self.__get_datapoint(ts=100))
obj.post_process() # shouldn't raise ValueError because obj.start_time is None
def test_csv_report_fieldname_order(self):
obj = FinalStatus()
obj.engine = EngineEmul()
obj.parameters = BetterDict()
csv_report = obj.engine.create_artifact("report", ".csv")
obj.parameters.merge({
"dump-csv": csv_report,
})
obj.startup()
obj.aggregated_second(self.__get_datapoint(ts=90))
obj.aggregated_second(self.__get_datapoint(ts=100))
obj.shutdown()
obj.post_process()
self.assertTrue(os.path.exists(csv_report))
with open(csv_report) as fds:
fieldnames = fds.readline().strip().split(",")
perc_fields = [float(name[5:]) for name in fieldnames if name.startswith('perc_')]
self.assertTrue(sorted(perc_fields) == perc_fields)
rc_fields = [float(name[3:]) for name in fieldnames if name.startswith('rc_')]
self.assertTrue(sorted(rc_fields) == rc_fields)
|
StarcoderdataPython
|
161252
|
"""
用于减少编码中的多个简单条件if分支,
实现类似 java spring 中通过 application context 生命周期回调实现的工厂路由
实例见下方test
"""
import functools
from blinker import Signal
def dispatch(func):
"""
入口方法装饰器
:param func: 入口方法
:return: 装饰后的方法
"""
# 路由表
signal_ = Signal()
@functools.wraps(func)
def wrapper(arg0, *args, **kwargs):
"""获取分支方法,获取失败则使用入口方法做兜底"""
if signal_.receivers and signal_.has_receivers_for(arg0):
# hard code。。
return signal_.send(arg0, *args, **kwargs)[0][1]
return func(arg0, *args, **kwargs)
def mapping(key):
def wrap(branch_func):
@signal_.connect_via(key)
def do_branch_func(arg0, *args, **kwargs):
return branch_func(arg0, *args, **kwargs)
return do_branch_func
return wrap
wrapper.mapping = mapping
return wrapper
if __name__ == '__main__':
# pylint: disable = E, W, R, C
@dispatch
def fun(key):
raise ValueError(f'key error, key: {key}')
@fun.mapping(1)
def __fun1(key):
return 1 + key
@fun.mapping(2)
def __fun2(key):
return 2 + key
@fun.mapping(3)
@fun.mapping(4)
def __fun34(key):
return 3 + key
print(f'result:{fun(1)}')
print(f'result:{fun(3)}')
print(f'result:{fun(5)}')
|
StarcoderdataPython
|
4808142
|
<reponame>willyspinner/High-Performance-Face-Recognition<gh_stars>100-1000
import scipy.io as sio
import pickle
import numpy as np
import os
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy import spatial
from sklearn.externals import joblib
import time
import sys
sys.path.append('/home/zhaojian/liblinear/python')
from liblinearutil import *
from scipy import sparse
path = '/media/samsung/learnnet_model_feature/learnnetNovelSet_1/'
files = os.listdir(path)
for file in files:
feature = sio.loadmat(path + file)["identityFeature"]
feature = feature.reshape((-1, 2048))
print feature.shape
break
|
StarcoderdataPython
|
1773211
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: iftikhan
@description: This file will hold logic to clean up reporting current account status.
"""
import datetime
import io
import json
import logging
import re
import boto3
import xlwt
from botocore.exceptions import ClientError
from jinja2 import Template
from xlwt.compat import basestring
from constant import Constant
from me_logger import log_error
from util import get_accounts_by_company_name, get_all_accounts
from utils.notification import notify_msg
logger = logging.getLogger(__name__)
logger.setLevel(getattr(logging, Constant.LOG_LEVEL))
def lambda_handler(event, context):
logger.debug(f'Lambda event:{event}')
# In case user want to run report for particular company
company_name = event.get('CompanyName')
# Sign URL Expire time in sec (Total 7 days)
expires_in = 60 * 60 * 24 * 7
try:
if company_name:
accounts = get_accounts_by_company_name(company_name=company_name)
key = f"ae_report_{company_name}_{datetime.datetime.now()}"
else:
accounts = get_all_accounts()
key = f"ae_report_all_accounts_{datetime.datetime.now()}"
if not accounts:
raise Exception("This is no account records in database to report")
else:
# XLS Flow
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('MigrationEngineReport')
cols_data = [key for key, value in accounts[0].items()]
# Adding headers
for i, field_name in enumerate(cols_data):
worksheet.write(0, i, field_name)
worksheet.col(i).width = 6000
style = xlwt.easyxf('align: wrap yes')
# Adding row data
for row_index, row in enumerate(accounts):
for cell_index, cell_value in enumerate(row.items()):
cell_value = cell_value[1]
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
if not cell_value:
cell_value = None
worksheet.write(row_index + 1, cell_index, cell_value, style)
# uncomment below line if you want to save it in local file system
# workbook.save('output.xls')
# Reading xls data to upload on s3
try:
fp = io.BytesIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
except IOError as ioe:
logger.error(ioe)
finally:
fp.close()
# Uploading xls data to upload to s3
s3_client = boto3.client('s3')
s3_client.put_object(Body=data, Bucket=Constant.SHARED_RESOURCE_BUCKET,
Key=f"{key}.xls")
# generate pre-signed url
xls_link = s3_client.generate_presigned_url('get_object',
Params={'Bucket': Constant.SHARED_RESOURCE_BUCKET,
'Key': f"{key}.xls"},
ExpiresIn=expires_in)
# HTML Flow
# jinja2 Template
template = Template("<table> "
"{% set glob={'isHeader':true} %}"
"{% for account in accounts %}"
"{% if glob.isHeader %}"
"{% set _ = glob.update({'isHeader':false}) %}"
"<tr style='background: gray;'>"
"{% for key,value in account.items() %}"
"<th > {{ key }} </th>"
"{% endfor %}"
"</tr>"
"{% endif %}"
"<tr>"
"{% for key,value in account.items() %}"
"<td> {{ value }} </td>"
"{% endfor %}"
"</tr>"
"{% endfor %}"
"</table>"
"<style>"
"th {background-color: #4CAF50;color: white;}"
"th, td {padding: 5px;text-align: left;}"
"tr:nth-child(even) {background-color: #f2f2f2;}"
"</style>")
# Generate HTML
report_data = template.render(accounts=accounts)
# Upload HTML data to s3
s3_client.put_object(Body=bytes(report_data, 'utf-8'), Bucket=Constant.SHARED_RESOURCE_BUCKET,
Key=f"{key}.html")
# generate pre-signed url
html_link = s3_client.generate_presigned_url('get_object',
Params={'Bucket': Constant.SHARED_RESOURCE_BUCKET,
'Key': f"{key}.html"},
ExpiresIn=expires_in)
notify_data = {
'SlackHandle': None,
'SlackMessage': {
'attachments': [
{
'color': '#0ec1eb',
'author_name': Constant.AUTHOR_NAME,
'author_icon': Constant.AUTHOR_ICON,
'title': 'Migration Engine Reports',
'text': f"Click <{xls_link}|Report.xls> for XLS report.\n"
f"Click <{html_link}|Report.html> for HTML report.\n"
f"Above reports links will expire after 7 days.",
'footer': Constant.NOTIFICATION_NOTES,
'ts': datetime.datetime.now().timestamp()
}]
}}
notify_msg(Constant.NOTIFICATION_TOPIC, Constant.NOTIFICATION_TITLE, json.dumps(notify_data))
except ClientError as ce:
log_error(logger=logger, account_id=None, company_name=company_name or "All Companies", error=ce,
error_type=Constant.ErrorType.RGE, notify=True)
raise ce
except Exception as ex:
log_error(logger=logger, account_id=None, company_name=company_name or "All Companies",
error_type=Constant.ErrorType.RGE,
notify=True, error=ex)
raise ex
return {'Status': Constant.StateMachineStates.COMPLETED, 'CompanyName': company_name}
lambda_handler({}, None)
|
StarcoderdataPython
|
129919
|
#!/usr/bin/env python
import ConfigParser
import argparse
import cStringIO
import deploy_config
import getpass
import hashlib
import os
import pprint
import re
import service_config
import socket
import string
import subprocess
import sys
import telnetlib
import tempfile
import time
import uuid
from datetime import datetime
from supervisor_client import SupervisorClient
from service_config import ServiceConfig
from tank_client import TankClient
SUPERVISOR_SUCCESS = "OK"
STOPPED_STATUS = ["STOPPED", "BACKOFF", "EXITED", "FATAL"]
HADOOP_PROPERTY_PREFIX = "hadoop.property."
HADOOP_CONF_PATH = "/etc/hadoop/conf"
FAKE_SVN_VERSION = "12345"
class Template(string.Template):
# the orginal delimiter '$' is also commonly used by shell script, so
# overwrite to '%' here.
delimiter = '%'
class Log:
# We have such a agreement on verbosity level:
# 0: equals to print_info
# 1: summary of a host level operation (a batch of command)
# 2: summary of a command
# 3: details or content of a command
verbosity = 0
@staticmethod
def _print(message):
print "%s %s" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
@staticmethod
def error_exit(print_stack):
if not print_stack:
sys.exit(2)
else:
raise RuntimeError("fatal error")
@staticmethod
def print_verbose(message, verbosity):
if verbosity <= Log.verbosity:
Log.print_info(message)
@staticmethod
def print_info(message):
Log._print(message)
@staticmethod
def print_success(message):
Log._print("\033[0;32m%s\033[0m" % message)
@staticmethod
def print_warning(message):
Log._print("\033[0;33m%s\033[0m" % message)
@staticmethod
def print_error(message):
Log._print("\033[0;31m%s\033[0m" % message)
@staticmethod
def print_critical(message):
Log.print_error(message)
Log.error_exit(False)
def get_deploy_config():
return deploy_config.get_deploy_config()
def get_local_package_path_general(path, artifact, version):
'''
Get the local tarball path of the package of specified artifact and version
@param path the base path of the tarball
@param artifact the artifact of the package
@param version the version of the package
@return string the full path of the tarball
Note: This method is for internal use, users shouldn't call it directly.
Users who want to obtain the local package path should call
get_local_package_path().
'''
return ("%s/%s-%s.tar.gz" % (path, artifact, version))
def get_local_package_path(artifact, version):
'''
Get the local tarball path of the package of specified artifact and version
@param artifact the artifact of the package
@param version the version of the package
@return string the full path of the tarball
'''
if artifact == "zookeeper":
package_path = get_local_package_path_general(
get_deploy_config().get_zookeeper_package_dir(),
artifact, version)
elif artifact == "hadoop":
package_path = get_local_package_path_general(
get_deploy_config().get_hadoop_package_dir(),
artifact, version)
elif artifact == "hbase":
package_path = get_local_package_path_general(
get_deploy_config().get_hbase_package_dir(),
artifact, version)
elif artifact == "impala-shell" or artifact == "impala":
package_path = get_local_package_path_general(
get_deploy_config().get_imapala_package_dir(),
artifact, version)
else:
Log.print_critical("Unknow artifact: %s" % artifact)
return package_path
def generate_package_revision(root):
'''
Get the revision of the package. Currently, only svn revision is
supported. If the package directory is not a svn working directory,
a fake revision will be returned.
@param root the local package root directory
@return string the revision of the package
'''
if os.path.islink(root):
real_path = os.readlink(root)
if not real_path.startswith('/'):
abs_path = "%s/%s" % (os.path.dirname(root), real_path)
else:
abs_path = real_path
else:
abs_path = root
try:
cmd = ["svn", "info", abs_path]
env = os.environ
# Enforce English locale.
env["LC_ALL"] = "C"
revision_prefix = "Revision: "
content = subprocess.check_output(cmd, env=env)
for line in content.splitlines():
if line.startswith(revision_prefix):
return "r%s" % line[len(revision_prefix):]
except:
# We cannot get the version No., just return a fake one
return "r%s" % FAKE_SVN_VERSION
def generate_checksum(path):
'''
Generate the SHA-1 digest of specified file.
@param path the path of the file
@return string the SHA-1 digest
'''
fd = open(path, "r")
sha1 = hashlib.sha1()
while True:
buffer = fd.read(4096)
if not buffer: break
sha1.update(buffer)
fd.close()
return sha1.hexdigest()
def upload_package(args, artifact, version):
'''
Upload the specified package to the package server(Tank). Note that
if the file with the same checksum is already uploaded, this uploading
will be skipped.
@param args the command line arguments object parsed by artparse.py
@param artifact the artifact of the package
@param version the version of the package
@return dict the package information return by the package server
'''
package_path = get_local_package_path(artifact, version)
Log.print_info("Uploading pacakge: %s" % package_path)
revision = generate_package_revision(get_root_dir(args.service))
Log.print_success("Revision is: %s" % revision)
Log.print_info("Generating checksum of package: %s" % package_path)
checksum = generate_checksum(package_path)
Log.print_success("Checksum is: %s" % checksum)
tank_client = get_tank_client()
package_info = tank_client.check_package(artifact, checksum)
if not package_info:
if 200 == tank_client.upload(package_path, artifact, revision):
Log.print_success("Upload package %s success" % package_path)
package_info = tank_client.check_package(artifact, checksum)
return eval(package_info)
else:
Log.print_warning("Package %s has already uploaded, skip uploading" %
package_path)
return eval(package_info)
return None
def generate_site_xml(args, template_dict):
'''
Generate the *-site.xml file according to the given properties dict.
@param args the argument object parsed by argparse
@param template_dict the properties dict
@return string the generated file content
'''
template_path = "%s/site.xml.tmpl" % get_template_dir()
template = Template(open(template_path).read())
config_value = ""
keys = template_dict.keys()
keys.sort()
for key in keys:
config_value += """
<property>
<name>%s</name>
<value>%s</value>
</property>
""" % (key, template_dict[key])
return template.substitute({"config_value": config_value})
def create_run_script(template_path, template_dict):
'''
Generate the run script of given script template and variables dict.
@param template_path the script template path
@param template_dict the variables dict
@return string the generated file content
'''
template = Template(open(template_path).read())
content = template.safe_substitute(template_dict)
return content
def get_template_dir():
'''
Get the config templates directory.
'''
return '%s/template' % get_deploy_config().get_config_dir()
def get_config_dir():
'''
Get the service config directory.
'''
return get_deploy_config().get_config_dir()
def get_root_dir(service):
'''
Get the local root directory of specified service.
@param service the service name
@return string the local root directory of the service
'''
if service == "hdfs" or service == "yarn":
return get_deploy_config().get_hadoop_root()
if service == "hbase":
return get_deploy_config().get_hbase_root()
if service == "zookeeper":
return get_deploy_config().get_zookeeper_root()
if service == "impala":
return get_deploy_config().get_impala_root()
Log.print_critical("Unknow service: %s" % service)
def get_supervisor_client(host, service, cluster, job):
'''
A factory method to construct a supervisor client object.
@param host the remote server's host
@param service the service name
@param cluster the cluster name
@param job the job name
@return object the supervisor client object
'''
return service_config.get_supervisor_client(host, service, cluster, job)
def get_tank_client():
'''
A factory method to construct a tank(package server) client object.
'''
deploy_config = get_deploy_config()
tank_config = deploy_config.get_tank_config()
return TankClient(tank_config.get('server_host'),
tank_config.get('server_port'))
def get_service_config(args):
'''
Get service config, without any dependencies.
@param args the command line arguments object parsed by argparse
'''
service_config.get_short_user_name(args)
if not getattr(args, args.service + "_config", None):
setattr(args, args.service+"_config", ServiceConfig(args))
return getattr(args, args.service+"_config")
def generate_service_token(service, cluster):
'''
Generate a token used to bootstrap and cleanup.
@param service the service name
@param cluster the cluster name
@return string the generated token
'''
return str(uuid.uuid3(uuid.NAMESPACE_DNS,'%s-%s' % (
service, cluster)))
def check_input(input, yes='y'):
'''
Check if the input string is yes or not.
'''
return input.strip().lower() == yes.lower()
def check_admin_priviledge(args):
'''
Check if the current user is in the administrators list or not. Note that
this will be checked only when security is enabled.
'''
status, short_user_name = service_config.get_short_user_name_full()
args.short_user_name = short_user_name
if is_security_enabled(args):
if status:
admin_list = get_deploy_config().get_admin_list()
if short_user_name not in admin_list:
Log.print_critical("User %s is not an authorized administrator, "
"this operation can't be processed" % user)
else:
Log.print_critical('You must kinit your kerberos principal first')
def is_security_enabled(args):
'''
Determine if security is enabled or not.
'''
get_service_config(args)
if args.service == "zookeeper":
return len(args.zookeeper_config.configuration.generated_files["jaas-server.conf"]) != 0
elif args.service == "hdfs":
core_site_dict = args.hdfs_config.configuration.generated_files["core-site.xml"]
return (core_site_dict["hadoop.security.authentication"] == "kerberos") and (
core_site_dict["hadoop.security.authorization"] == "true")
elif args.service == "yarn":
core_site_dict = args.yarn_config.configuration.generated_files["core-site.xml"]
return (core_site_dict["hadoop.security.authentication"] == "kerberos") and (
core_site_dict["hadoop.security.authorization"] == "true")
elif args.service == "hbase":
hbase_site_dict = args.hbase_config.configuration.generated_files["hbase-site.xml"]
return (hbase_site_dict["hbase.security.authentication"] == "kerberos") and (
hbase_site_dict["hbase.security.authorization"] == "true")
elif args.service == "impala":
core_site_dict = args.impala_config.configuration.generated_files["core-site.xml"]
return (core_site_dict["hadoop.security.authentication"] == "kerberos") and (
core_site_dict["hadoop.security.authorization"] == "true")
else:
return false
def confirm_bootstrap(service, service_config):
'''
Let the users confirm bootstrap interactively. Users will be asked to
set a password, or a random password will be given. The password is
the verification token when users want to do cleanup.
'''
Log.print_warning("You should set a bootstrap password, " \
"it will be requried when you do cleanup")
password = <PASSWORD>()
input = raw_input("Set a password manually? (y/n) ")
if check_input(input):
input = getpass.getpass("Please input your password: ")
if len(input.strip()) >= 6:
password = input.strip()
else:
Log.print_critical("The length of the password is at least 6")
else:
Log.print_info("A random password will be generated")
password = generate_service_token(service, service_config.cluster.name)
Log.print_warning("Your password is: %s, you should store this " \
"in a safe place, because this is the verification code used " \
"to do cleanup" % password)
return password
def confirm_action(args, action):
'''
Let the users confirm the specify action interactively.
'''
Log.print_warning("You will %s the cluster \033[31;1m%s\033[0;33m, "
"do you really want to do this?" % (action, args.cluster))
token = generate_random_confirm_token()
input = raw_input("Please input \033[31;1m%s\033[0m to confirm: " % token)
if check_input(input, token):
Log.print_info("Begin to %s the cluster" % action)
else:
Log.print_critical("%s canceled" % action.capitalize())
def confirm_cleanup(args, service, service_config):
'''
Let the user confirm cleanup interactively. Users will be asked to input
the password set when the service is bootstrapped.
'''
confirm_action(args, 'cleanup')
input = getpass.getpass("Please input your installation password: ")
if len(input.strip()) >= 6:
return input.strip()
else:
Log.print_critical("The length of the password is at least 6")
def confirm_stop(args):
'''
Let the user confirm the stop action interactively.
'''
confirm_action(args, 'stop')
def confirm_start(args):
'''
Let the user confirm the start action interactively.
'''
confirm_action(args, 'start')
def confirm_restart(args):
'''
Let the user confirm the restart action interactively.
'''
confirm_action(args, 'restart')
def install_service(args, service, service_config, artifact):
'''
Install the specified service. Here installation means uploading the
service package to the package server(Tank).
@param args the command line arguments object
@param service the service name
@param service_config the service config object
@param artifact the artifact name
'''
Log.print_info("Installing %s to package server" % artifact)
package_info = upload_package(args, artifact, service_config.cluster.version)
if package_info:
Log.print_success("Install %s to package server success" % artifact)
pprint.pprint(package_info)
else:
Log.print_critical("Install %s to package server fail" % artifact)
def cleanup_job(service, service_config, host, job_name,
cleanup_token, cleanup_script=""):
'''
Clean up a task of the specified service and job. Note that cleanup
requires that the task must be stopped, so users should stop the task
before cleanup.
@param service the service name
@param service_config the service config object
@param host the host of the task
@param job_name the job name
@param cleanup_token the token used to verify cleanup
@param cleanup_script the user supplied cleanup script
@param artifact the artifact name
'''
Log.print_info("Cleaning up %s on %s" % (job_name, host))
supervisor_client = get_supervisor_client(host, service,
service_config.cluster.name, job_name)
message = supervisor_client.cleanup(cleanup_token, cleanup_script)
if SUPERVISOR_SUCCESS == message:
Log.print_success("Cleanup %s on %s success" % (job_name, host))
else:
Log.print_error("Cleanup %s on %s fail: %s" % (job_name, host, message))
def bootstrap_job(args, artifact, service, service_config, host, job_name,
cleanup_token, data_dir_indexes='0', bootstrap_script='', **config_files):
'''
Bootstrap a task of the specified service and job. Note that before
bootstrapping users should ensure that the data and log directories at
the server side are empty.
@param args the command line arguments object
@param artifact the artifact name
@param service the service name
@param service_config the service config object
@param host the host of the task
@param job_name the job name
@param cleanup_token the token used to verify cleanup
@param data_dir_indexes the data directory indexes
@param bootstrap_script the user supplied bootstrap script
@param config_files the config files dict
'''
Log.print_info("Bootstrapping %s on %s" % (job_name, host))
supervisor_client = get_supervisor_client(host, service,
service_config.cluster.name, job_name)
if (service_config.cluster.package_name and service_config.cluster.revision
and service_config.cluster.timestamp):
message = supervisor_client.bootstrap(artifact,
package_name=service_config.cluster.package_name,
revision=service_config.cluster.revision,
timestamp=service_config.cluster.timestamp,
cleanup_token=cleanup_token,
bootstrap_script=bootstrap_script,
data_dir_indexes=data_dir_indexes,
**config_files)
elif args.update_package:
message = supervisor_client.bootstrap(artifact, force_update=True,
cleanup_token=cleanup_token, bootstrap_script=bootstrap_script,
data_dir_indexes=data_dir_indexes, **config_files)
else:
message = supervisor_client.bootstrap(artifact,
package_name=args.package_name, revision=args.revision,
timestamp=args.timestamp, cleanup_token=cleanup_token,
bootstrap_script=bootstrap_script, data_dir_indexes=data_dir_indexes,
**config_files)
if SUPERVISOR_SUCCESS == message:
Log.print_success("Bootstrap %s on %s success" % (job_name, host))
else:
Log.print_critical("Bootstrap %s on %s fail: %s" % (job_name,
host, message))
def start_job(args, artifact, service, service_config, host, job_name,
start_script, http_url, **config_files):
'''
Start the task of specified service and job.
@param args the command line arguments object
@param artifact the artifact name
@param service the service name
@param service_config the service config object
@param host the host of the task
@param job_name the job name
@param start_script the user supplied start script
@param http_url the task's http entry url
@param config_files the config files dict
'''
Log.print_info("Starting %s on %s" % (job_name, host))
supervisor_client = get_supervisor_client(host, service,
service_config.cluster.name, job_name)
if not args.update_config:
config_files = dict()
if (service_config.cluster.package_name and service_config.cluster.revision
and service_config.cluster.timestamp):
message = supervisor_client.start(artifact,
package_name=service_config.cluster.package_name,
revision=service_config.cluster.revision,
timestamp=service_config.cluster.timestamp,
http_url=http_url, start_script=start_script,
**config_files)
elif args.update_package:
message = supervisor_client.start(artifact, force_update=True,
http_url=http_url, start_script=start_script, **config_files)
else:
message = supervisor_client.start(artifact, package_name=args.package_name,
revision=args.revision, timestamp=args.timestamp, http_url=http_url,
start_script=start_script, **config_files)
if SUPERVISOR_SUCCESS == message:
Log.print_success("Start %s on %s success" % (job_name, host))
else:
Log.print_error("Start %s on %s fail: %s" % (job_name, host, message))
def stop_job(service, service_config, host, job_name):
'''
Stop the task of specified service and job.
@param service the service name
@param service_config the service config object
@param host the host of the task
@param job_name the job name
'''
Log.print_info("Stopping %s on %s" % (job_name, host))
supervisor_client = get_supervisor_client(host, service,
service_config.cluster.name, job_name)
message = supervisor_client.stop()
if SUPERVISOR_SUCCESS == message:
Log.print_success("Stop %s on %s success" % (job_name, host))
else:
Log.print_error("Stop %s on %s fail: %s" % (job_name, host, message))
def show_job(service, service_config, host, job_name):
'''
Show the state the task of specified service and job.
@param service the service name
@param service_config the service config object
@param host the host of the task
@param job_name the job name
'''
Log.print_info("Showing %s on %s" % (job_name, host))
supervisor_client = get_supervisor_client(host, service,
service_config.cluster.name, job_name)
state = supervisor_client.show()
if state == 'RUNNING':
Log.print_success("%s on %s is %s" % (job_name, host, state))
else:
Log.print_error("%s on %s is %s" % (job_name, host, state))
def check_service(host, port):
'''
Check whether the given host:port is accessable or not.
'''
t = telnetlib.Telnet()
try:
t.open(host, port)
except:
return False
t.close()
return True
def check_job_stopped(service, cluster, job, host):
'''
Check whether a specified task is already stopped or not.
'''
supervisor_client = get_supervisor_client(host,
service, cluster, job)
status = supervisor_client.show()
return status in STOPPED_STATUS
def wait_for_job_stopping(service, cluster, job, host):
'''
Wait for a specified job to be stopped.
'''
while not check_job_stopped(service, cluster, job, host):
Log.print_warning("Wait for %s on %s stopping" % (job, host))
time.sleep(2)
def check_job_started(service, cluster, job, host):
'''
Check whether a specified task is already started or not.
'''
supervisor_client = get_supervisor_client(host,
service, cluster, job)
status = supervisor_client.show()
return status == 'RUNNING'
def wait_for_job_starting(service, cluster, job, host):
'''
Wait for a specified job to be started.
'''
# Wait 10 seconds to let supervisord start the task
time.sleep(10)
if not check_job_started(service, cluster, job, host):
Log.print_critical('%s on %s start failed' % (job, host))
def get_hadoop_package_root(version):
'''
Get the hadoop package root directory
'''
return "%s/hadoop-%s" % (get_deploy_config().get_hadoop_package_dir(), version)
def get_hbase_package_root(version):
'''
Get the hbase package root directory
'''
return "%s/hbase-%s/hbase-%s" % (get_deploy_config().get_hbase_package_dir(),
version, version)
def get_zookeeper_package_root(version):
'''
Get the zookeeper package root directory
'''
return "%s/zookeeper-%s" % (
get_deploy_config().get_zookeeper_package_dir(), version)
def parse_shell_command(args, command_dict):
'''
Parse the shell command and its options from the command line arguements.
'''
if len(args.command) == 0 or args.command[0] == 'help':
print_shell_help_info(command_dict)
return (None, None)
command = args.command[0]
command_info = command_dict.get(command)
if not command_info:
Log.print_warning(
"Can't find main class of '%s', suppose it's a class name" % command)
main_class = command
else:
main_class = command_info[0]
return (main_class, args.command[1:])
def print_shell_help_info(command_dict):
'''
Print the help information for the specified shell commands.
'''
help_info="help \tprint this help information"
for key, value in command_dict.iteritems():
help_info += "\n%-10s\t%s" % (key, value[1])
print help_info
def write_file(file_name, content):
'''
Write the specified content to the specified file.
'''
file = open(file_name, "wb")
file.write(content)
file.close()
def make_package_dir(args, artifact, version):
'''
Make the local package directories.
'''
cmd = ["mkdir", "-p", "%s/%s/" % (args.package_root, args.cluster)]
subprocess.check_call(cmd)
package_path = get_local_package_path(artifact, version)
cmd = ["tar", "-zxf", package_path, "-C", "%s/%s/" % (
args.package_root, args.cluster)]
subprocess.check_call(cmd)
def pack_package(args, artifact, version):
'''
Pack the package with generated configuration files into a tarball.
'''
cmd = ["tar", "-C", "%s/%s" % (args.package_root, args.cluster),
"-zchf", "%s/%s/%s-%s-%d.tar.gz" % (args.package_root,
args.cluster, artifact, version, time.time()),
"./%s-%s" % (artifact, version)]
subprocess.check_call(cmd)
def append_to_file(file, content):
'''
Append specified content to the specified file.
'''
fp = open(file, "a")
fp.write(content)
fp.close()
def confirm_rolling_update(id, wait_time):
'''
Let the user confirm the rolling update action interactively.
'''
while True:
if wait_time > 0:
Log.print_info("Waiting %d seconds before updating next task..."
% wait_time)
time.sleep(wait_time)
input = raw_input("Ready to update task %d? (y/n) " % id)
if check_input(input):
return True
return False
def get_zk_address(cluster):
'''
Get the zookeeper name address according to the cluster name.
'''
return "bj%s-zk-%s.hadoop.srv" % (cluster[0:2], cluster[2:])
def generate_random_confirm_token():
'''
Generate a random 8 bytes token used to do confirm
'''
return str(uuid.uuid4())[0:8]
def get_task_by_hostname(hosts, hostnames):
tasks = []
for hostname in hostnames:
host_ip = socket.gethostbyname(hostname)
found_task = False
for id in hosts.iterkeys():
if hosts[id] == host_ip:
tasks.append(id)
found_task = True
break
# return an invalid task id if can't find valid task
if found_task == False:
raise ValueError(hostname + ' is not a valid host of cluster, please check your config')
return tasks
if __name__ == '__main__':
test()
|
StarcoderdataPython
|
154319
|
from tests.testutils.mocks.mock_paths import MockPaths
def test_app():
with MockPaths():
from tilescopegui.factory import TestingConfig, create_app
app = create_app(TestingConfig())
app.blueprints["home_blueprint"].template_folder = MockPaths._TMP.as_posix()
yield app
|
StarcoderdataPython
|
86418
|
<reponame>tirkarthi/odin-ai<gh_stars>0
from __future__ import absolute_import, division, print_function
import os
import pickle
import warnings
from collections import defaultdict
import matplotlib.mlab as mlab
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from scipy import stats
from tqdm import tqdm
from odin.stats import describe
from odin.utils import (ArgController, Progbar, UnitTimer, auto_logging,
batching, catch_warnings_ignore, get_script_path,
is_number, mpi, unique_labels)
from odin.visual import (Visualizer, generate_random_colors, merge_text_graph,
plot_confusion_matrix, plot_figure, plot_histogram,
plot_histogram_layers, plot_save, print_dist)
os.environ['ODIN'] = 'gpu,float32,seed=5218'
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=ImportWarning)
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KernelDensity
from sklearn.base import DensityMixin, BaseEstimator
# ===========================================================================
# Helpers
# ===========================================================================
def _clipping_quartile(x, alpha=1.5, test_mode=False):
# result wider range threshold, keep more data points,
# lower LLK
x = x.astype('float32')
Q1 = np.percentile(x, q=25)
Q3 = np.percentile(x, q=75)
IQR = Q3 - Q1
high = Q3 + alpha * IQR
low = Q1 - alpha * IQR
if test_mode:
x[x < low] = low
x[x > high] = high
else:
x = x[np.logical_and(low <= x, x <= high)]
return x
def _log_norm(x, scale_factor=10000):
x = x.astype('float32')
x_sum = np.sum(x)
return np.log1p(x / (x_sum + np.finfo(x.dtype).eps) * scale_factor)
def _draw_hist(x, ax, title, n_bins, show_yticks=True):
count, bins = plot_histogram(x=x,
bins=n_bins,
ax=ax,
normalize=False,
kde=False,
range_0_1=False,
covariance_factor=0.25,
centerlize=False,
fontsize=8,
title=title)
plt.xlim((np.min(x), np.max(x)))
plt.xticks(np.linspace(start=np.min(x),
stop=np.max(x),
num=5,
dtype='float32'),
fontsize=6)
if show_yticks:
plt.yticks(np.linspace(start=np.min(count),
stop=np.max(count),
num=5,
dtype='int32'),
fontsize=5)
else:
plt.yticks([], [])
return count, bins
# ===========================================================================
# LabelThresholding class
# ===========================================================================
class ProbabilisticEmbedding(BaseEstimator, DensityMixin, Visualizer):
r""" Probabilistic embedding of real values vectors using
Gaussian Mixture Model
Arguments:
n_components_per_class : int
number of GMM components for thresholding (default: 2)
positive_component : int
in case, 3 or more components are used, this is the index
of the component from where it is positive during thresholding
(the components are sorted by increasing mean order)
log_norm : bool
clip_quartile : float
ci_threshold : float
random_state: int
verbose: bool
"""
def __init__(self,
n_components_per_class=2,
positive_component=1,
log_norm=True,
clip_quartile=0.,
remove_zeros=True,
ci_threshold=-0.68,
random_state=1,
verbose=False):
super(ProbabilisticEmbedding, self).__init__()
self.n_components_per_class = int(n_components_per_class)
self.positive_component = int(positive_component)
assert self.positive_component > 0
self.remove_zeros = bool(remove_zeros)
self.log_norm = bool(log_norm)
self.clip_quartile = float(clip_quartile)
ci_threshold = float(ci_threshold)
assert 0 <= np.abs(ci_threshold) <= 1
self.ci_threshold = ci_threshold
self.verbose = bool(verbose)
self.random_state = random_state
self._models = []
# ******************** properties ******************** #
@property
def n_classes(self):
return len(self._models)
@property
def means(self):
""" Components' means for all classes
Returns
-------
array
means array (n_components, n_classes)
"""
return np.hstack([
gmm.means_.ravel()[order][:, np.newaxis] for order, gmm in self._models
])
@property
def precisions(self):
""" Components' precision for all classes
Returns
-------
array
precisions array (n_components, n_classes)
"""
return np.hstack([
gmm.precisions_.ravel()[order][:, np.newaxis]
for order, gmm in self._models
])
# ******************** main ******************** #
def normalize(self, x, test_mode=False):
if x.ndim > 1:
x = x.ravel()
assert np.all(x >= 0), "Only support non-negative values"
if self.remove_zeros and not test_mode:
x = x[x > 0]
if self.clip_quartile > 0:
x = _clipping_quartile(x, alpha=self.clip_quartile, test_mode=test_mode)
if self.log_norm:
x = _log_norm(x)
return x
def fit(self, X):
assert X.ndim == 2, "Only support input matrix but given: %s" % str(X.shape)
n_classes = X.shape[1]
it = tqdm(list(range(n_classes))) if self.verbose else range(n_classes)
for i in it:
# ====== normalizing ====== #
x_train = self.normalize(X[:, i], test_mode=False)
# ====== GMM ====== #
gmm = GaussianMixture(n_components=self.n_components_per_class,
covariance_type='diag',
init_params='kmeans',
n_init=8,
max_iter=120,
random_state=self.random_state)
gmm.fit(x_train[:, np.newaxis])
# ====== save GMM ====== #
means_ = gmm.means_.ravel()
order = np.argsort(means_)
self._models.append((order, gmm))
def fit_transform(self, X, return_probabilities=True):
self.fit(X)
return self.predict_proba(X) if return_probabilities else self.predict(X)
def _predict(self, X, threshold):
assert X.shape[1] == self.n_classes, "Number of classes mis-match"
y = []
for i, (order, gmm) in enumerate(self._models):
x_test = self.normalize(X[:, i], test_mode=True)
# binary thresholding
if threshold is not None:
ci = stats.norm.interval(
np.abs(threshold),
loc=gmm.means_[order[self.positive_component]],
scale=np.sqrt(1 / gmm.precisions_[order[self.positive_component]]))
x_out = (x_test >=
(ci[0] if threshold < 0 else ci[1])).astype('float32')
x_out = x_out[:, np.newaxis]
# probabilizing
else:
probas = gmm.predict_proba(
x_test[:, np.newaxis]).T[order][self.positive_component:]
probas = np.mean(probas, axis=0)
x_out = probas[:, np.newaxis]
y.append(x_out)
return np.concatenate(y, axis=1)
def predict(self, X):
return self._predict(X, threshold=self.ci_threshold)
def predict_proba(self, X):
return self._predict(X, threshold=None)
def score_samples(self, X):
r"""Compute the weighted log probabilities for each sample.
Arguments:
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns:
log_prob : array, shape (n_samples,)
Log probabilities of each data point in X.
"""
scores = []
for x, (order, gmm) in zip(X.T, self._models):
x = self.normalize(x, test_mode=True)
s = gmm.score_samples(x[:, np.newaxis])[:, np.newaxis]
scores.append(s)
return np.mean(np.hstack(scores), axis=1)
def score(self, X, y=None):
r"""Compute the per-sample average log-likelihood of the given data X.
Arguments:
X : array-like, shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns:
log_likelihood : float
Log likelihood of the Gaussian mixture given X.
"""
return self.score_samples(X).mean()
# ******************** methods for diagnosing ******************** #
def _check_input(self, X, labels):
assert X.ndim == 2, \
"Only support input matrix but given: %s" % str(X.shape)
n_classes = X.shape[1]
assert n_classes == self.n_classes, \
"Fitted with %d classes but give %d classes" % (self.n_classes, n_classes)
if labels is None:
labels = ['#%d' % i for i in range(n_classes)]
assert len(labels) == n_classes, \
"Number of classes and labels mismatch"
return X, labels, n_classes
def plot_diagnosis(self, X, labels=None, n_bins=200):
X, labels, n_classes = self._check_input(X, labels)
nrow = n_classes
ncol = 1
fig = plot_figure(nrow=nrow * 2, ncol=8)
# add 1 for threshold color
# add 1 for PDF color
colors = sns.color_palette(n_colors=self.n_components_per_class + 2)
for i, (name, (order, gmm)) in enumerate(zip(labels, self._models)):
start = ncol * i
means_ = gmm.means_.ravel()[order]
precision_ = gmm.precisions_.ravel()[order]
x = self.normalize(X[:, i], test_mode=False)
# ====== scores ====== #
# score
score_llk = gmm.score(x[:, np.newaxis])
score_bic = gmm.bic(x[:, np.newaxis])
score_aic = gmm.aic(x[:, np.newaxis])
# ====== the histogram ====== #
ax = plt.subplot(nrow, ncol, start + 1)
count, bins = _draw_hist(x,
ax=ax,
title="[%s] LLK:%.2f BIC:%.2f AIC:%.2f" %
(name, score_llk, score_bic, score_aic),
n_bins=n_bins,
show_yticks=True)
# ====== draw GMM PDF ====== #
y_ = np.exp(gmm.score_samples(bins[:, np.newaxis]))
y_ = (y_ - np.min(y_)) / (np.max(y_) - np.min(y_)) * np.max(count)
ax.plot(bins, y_, color='red', linestyle='-', linewidth=1.5, alpha=0.6)
# ====== draw the threshold ====== #
ci = stats.norm.interval(
np.abs(self.ci_threshold),
loc=gmm.means_[order[self.positive_component]],
scale=np.sqrt(1 / gmm.precisions_[order[self.positive_component]]))
threshold = ci[0] if self.ci_threshold < 0 else ci[1]
ids = np.where(bins >= threshold, True, False)
ax.fill_between(bins[ids],
y1=0,
y2=np.max(count),
facecolor=colors[-2],
alpha=0.3)
ax.text(np.min(bins[ids]), np.min(count), "%.2f" % threshold)
# ====== plot GMM probability ====== #
x_ = np.linspace(np.min(bins), np.max(bins), 1200)
y_ = gmm.predict_proba(x_[:, np.newaxis]) * np.max(count)
for c, j in zip(colors, y_.T):
plt.plot(x_, j, color=c, linestyle='--', linewidth=1.8, alpha=0.6)
# ====== draw the each Gaussian bell ====== #
ax = ax.twinx()
_x = np.linspace(start=np.min(x), stop=np.max(x), num=800)
for c, m, p in zip(colors, means_, precision_):
with catch_warnings_ignore(Warning):
j = mlab.normpdf(_x, m, np.sqrt(1 / p))
ax.plot(_x, j, color=c, linestyle='-', linewidth=1)
ax.scatter(_x[np.argmax(j)],
np.max(j),
s=66,
alpha=0.8,
linewidth=0,
color=c)
ax.yaxis.set_ticklabels([])
fig.tight_layout()
self.add_figure('diagnosis', fig)
return self
def plot_distribution(self, X, labels=None):
X, labels, n_classes = self._check_input(X, labels)
X_bin = self.predict(X)
X_prob = self.predict_proba(X)
normalize_to_01 = lambda x: x / np.sum(x)
dist_raw = normalize_to_01(np.sum(X, axis=0))
dist_bin = normalize_to_01(np.sum(X_bin, axis=0))
dist_prob = normalize_to_01(np.sum(X_prob, axis=0))
x = np.arange(n_classes)
fig = plot_figure(nrow=3, ncol=int(n_classes * 1.2))
ax = plt.gca()
colors = sns.color_palette(n_colors=3)
bar1 = ax.bar(x, dist_raw, width=0.2, color=colors[0], alpha=0.8)
bar2 = ax.bar(x + 0.2, dist_bin, width=0.2, color=colors[1], alpha=0.8)
bar3 = ax.bar(x + 0.4, dist_prob, width=0.2, color=colors[2], alpha=0.8)
ax.set_xticks(x + 0.2)
ax.set_xticklabels(labels, rotation=-10)
ax.legend([bar1, bar2, bar3], ['Original', 'Binarized', 'Probabilized'])
ax.grid(True, axis='y')
ax.set_axisbelow(True)
self.add_figure('distribution', fig)
return self
def boxplot(self, X, labels=None):
X, labels, n_classes = self._check_input(X, labels)
nrow = n_classes
ncol = 3
fig = plot_figure(nrow=3 * nrow, ncol=int(1.5 * ncol))
for i, (x, name) in enumerate(zip(X.T, labels)):
start = i * ncol
ax = plt.subplot(nrow, ncol, start + 1)
ax.boxplot(x,
whis=1.5,
labels=['Original'],
flierprops={
'marker': '.',
'markersize': 8
},
showmeans=True,
meanline=True)
ax.set_ylabel(name)
ax = plt.subplot(nrow, ncol, start + 2)
ax.boxplot(x[x > 0],
whis=1.5,
labels=['NonZeros'],
flierprops={
'marker': '.',
'markersize': 8
},
showmeans=True,
meanline=True)
ax = plt.subplot(nrow, ncol, start + 3)
ax.boxplot(self.normalize(x, test_mode=False),
whis=1.5,
labels=['Normalized'],
flierprops={
'marker': '.',
'markersize': 8
},
showmeans=True,
meanline=True)
plt.tight_layout()
self.add_figure('boxplot', fig)
return self
|
StarcoderdataPython
|
1680380
|
<reponame>zaehuun/osscap2020
from pytet import *
NowRoad=[10,10,10,10,10,10,10,10,0,0,0,0,0,0,0,0,10,10,10,10,10,10,10,10]
NList=[4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]
#모든 방향은 LED Matrix 출력 기준임(좌우가 바꿔어 있음)
def NR():
return NowRoad
def R1(): #길전체가 오른쪽으로 이동
if NowRoad[5]==0: #벽에 붙었다면 실행X
return NowRoad
for i in NList:
if NowRoad[i]==0:
NowRoad[i-1]=0
break
for i in reversed(NList):
if NowRoad[i]==0:
NowRoad[i]=10
break
return NowRoad
def L1(): #길전체가 왼쪽으로 이동
if NowRoad[18]==0: #벽에 붙었다면 실행X
return NowRoad
for i in NList:
if NowRoad[i]==0:
NowRoad[i]=10
break
for i in reversed(NList):
if NowRoad[i]==0:
NowRoad[i+1]=0
break
return NowRoad
def LNar1(): #왼쪽 길이 한칸 줄어듬
Nl=NowRoad[4:20]
f1=Nl.index(0)
e1=Nl[f1+1:].index(10)+f1+1
if (e1-f1)<=5: #길 사이가 두칸 이내라면 실행X
return NowRoad
for i in reversed(NList):
if NowRoad[i]==0:
NowRoad[i]=10
return NowRoad
def RNar1(): #오른쪽 길이 한칸 줄어듬
Nl=NowRoad[4:20]
f1=Nl.index(0)
e1=Nl[f1+1:].index(10)+f1+1
if (e1-f1)<=5: #길 사이가 두칸 이내라면 실행X
return NowRoad
for i in NList:
if NowRoad[i]==0:
NowRoad[i]=10
return NowRoad
def RWid1(): #오른쪽 길이 한칸 늘어남
if NowRoad[5]==0: #벽에 붙어있다면 실행X
return NowRoad
for i in NList:
if NowRoad[i]==0:
NowRoad[i-1]=0
return NowRoad
def LWid1(): #왼쪽 길이 한칸 늘어남
if NowRoad[18]==0: #벽에 붙어 있다면 실행X
return NowRoad
for i in reversed(NList):
if NowRoad[i]==0:
NowRoad[i+1]=0
return NowRoad
def RptRoad(CR,n): #한가지 함수를 n번 반복
for i in range(n):
road.append(copy.deepcopy(CR()))
#출력 함수 변경 하기
def DRptRoad(CR1,CR2,n): #두가지 함수를 n번 번갈아가며 반복
for i in range(n):
#road.append()
road.append(copy.deepcopy(CR1()))
road.append(copy.deepcopy(CR2()))
# LookGood(NowRoad)
'''
def LookGood(NowRoad): #화면에서 잘 보이게 하는 용도
a=[]
for i in range(24):
if NowRoad[i]==10:
a.append("■")
else:
a.append("□")
print(a)
'''
|
StarcoderdataPython
|
75741
|
<filename>phishing/setmail.py
#!/usr/bin/env python
# Copyright (c) 2012, AverageSecurityGuy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of AverageSecurityGuy nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import smtpd
import smtplib
import asyncore
import dns.resolver
port = 2525
debug = False
def get_mx_record(domain):
records = dns.resolver.query(domain, 'MX')
return str(records[0].exchange)
class CustomSMTPServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
for rcptto in rcpttos:
print '[*] Sending message to {0}.'.format(rcptto)
domain = rcptto.split('@')[1]
mx = get_mx_record(domain)
try:
server = smtplib.SMTP(mx, 25)
if debug:
server.set_debuglevel(True)
server.sendmail(mailfrom, rcptto, data)
except smtplib.SMTPDataError as e:
print '[-] {0}'.format(str(e[1]))
except smtplib.SMTPServerDisconnected as e:
print '[-] {0}'.format(str(e))
except smtplib.SMTPConnectError as e:
print '[-] {0}'.format(str(e[1]))
server = CustomSMTPServer(('127.0.0.1', port), None)
print '[+] Server listening on port {0}'.format(port)
asyncore.loop()
|
StarcoderdataPython
|
3212602
|
from numpy import eye
def warmUpExercise():
""" an example function that returns the 5x5 identity matrix
"""
return eye(5)
|
StarcoderdataPython
|
52837
|
<reponame>JiangYangJie/Embedded<filename>esp8266/clock/printf.py
from dictionary import dicts
class Printf:
def __init__(self,oled):
self.oled=oled
self.clear()
def clear(self):#清屏
self.oled.fill(0)
self.oled.show()
def en(self, String, x, y):#显示英文,oled:屏幕对象,string:英文内容,x,y:屏幕坐标
self.oled.text(String, x, y)
self.oled.show()
def printf(self,char,x_axis,y_axis,line=30):#line字体行数(高)
offset_ = 0
a=['','','','','']
for k in char:#汉字和数字,英文的16进制列数不同
if k in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',' ',':']:
code = (hex(ord(k))[2:] + " ").replace(' ', '')
byte_data = dicts[code]
else:
code = 0x00 # 将中文转成16进制编码
data_code = k.encode("utf-8")
code |= data_code[0] << 16
code |= data_code[1] << 8
code |= data_code[2]
byte_data = dicts[code]
for y in range(0, line): # 控制y轴
for i in range(0,int((len(byte_data)/line))):#x轴的循环次数
a[i] = bin(byte_data[y+i*line]).replace('0b', '')#二进制换算
while len(a[i]) < 8: # 控制x轴
a[i] = '0' + a[i]
for x in range(0, 8):#填充像素块
# pass
self.oled.pixel(x_axis + offset_ + x+i*8, y + y_axis, int(a[i][x])) # 对8个像素点处理
if k in [':','.']:
offset_ += 6 # 让字体横着显示,控制间距
else:
offset_ += line
self.oled.show()
#
# p1=Printf('oled')
# p1.printf('123 ',1,1)
|
StarcoderdataPython
|
56687
|
<gh_stars>1-10
import cv2
import os
def detect_faces_from_webcam(webcam_index=0, window_title='Faces In Video', cascade='haarcascade_frontalface_default.xml',box_colour=(0, 255, 0), line_thickness=2):
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + cascade)
cap = cv2.VideoCapture(webcam_index)
while True:
_, img = cap.read()
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY, 1)
faces = face_cascade.detectMultiScale(grey, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), box_colour, line_thickness)
cv2.imshow(window_title, img)
k = cv2.waitKey(1)
if k == 27:
break
|
StarcoderdataPython
|
1761077
|
__all__ = ("IntegerMetaMap", "ScalarMetaMap", "MSpec")
import typing
from enum import IntEnum
from functools import lru_cache
from collections import OrderedDict
from warnings import warn
from .Spec import *
from .ArraySpec import ArraySpec
from .SpecNoIntegers import IntegerMetaMap
from .SpecNoScalars import ScalarMetaMap
def specClassNameGenerator(*, isArray: bool = False, integerMode=None, scalarMode=None, isDummy: bool = False):
name = []
if isDummy:
name.append("Dummy")
if isArray:
name.append("Array")
name.append("Spec")
if scalarMode:
name.append("NoScalars" + ScalarMetaMap._classToNamePartMapping[scalarMode])
if integerMode:
name.append(IntegerMetaMap._classToNamePartMapping[integerMode] + "Integers")
return "".join(name)
import re
nameRx = re.compile("^(Dummy)?(Array)?Spec(?:NoScalars(Dumb|Categorical))?(?:(To|No)Integers)?$")
def parseName(name: str) -> typing.Mapping[str, typing.Any]:
names = ("isDummy", "isArray", "scalarMode", "integerMode")
res = dict(zip(names, nameRx.match(name).groups()))
res["isDummy"] = bool(res["isDummy"])
res["isArray"] = bool(res["isArray"])
res["scalarMode"] = ScalarMetaMap._namePartToClassMapping[res["scalarMode"]]
res["integerMode"] = IntegerMetaMap._namePartToClassMapping[res["integerMode"]]
return res
@lru_cache(maxsize=None, typed=True)
def MSpec(*name: typing.Optional[typing.Tuple[str]], isArray: bool = False, integerMode=None, scalarMode=None, isDummy: bool = False, **kwargs):
"""A class choosing the right sequence of inheritance of mixins depending on traits the spec class must have"""
if name:
assert len(name) == 1
assert isinstance(name[0], str)
return MSpec(**parseName(name[0]), name=name[0])
else:
superclasses = []
if isDummy:
superclasses.append(DummySpec)
if isArray:
superclasses.append(ArraySpec)
if integerMode:
superclasses.append(integerMode)
if scalarMode:
superclasses.append(scalarMode)
if not superclasses:
superclasses.append(Spec)
if len(superclasses) == 1:
#warn("Use " + superclasses[0].__name__ + " directly")
return superclasses[0]
if "name" in kwargs:
name = kwargs["name"]
else:
name = specClassNameGenerator(isArray=isArray, integerMode=integerMode, scalarMode=scalarMode, isDummy=isDummy)
return type(name, tuple(superclasses), {})
|
StarcoderdataPython
|
173467
|
<gh_stars>1-10
"""**DEPRECATED** - Instead, use `@rules_rust//crate_universe:repositories.bzl"""
load(":repositories.bzl", "crate_universe_dependencies")
def crate_deps_repository(**kwargs):
# buildifier: disable=print
print("`crate_deps_repository` is deprecated. See setup instructions for how to update: https://bazelbuild.github.io/rules_rust/crate_universe.html#setup")
crate_universe_dependencies(**kwargs)
|
StarcoderdataPython
|
1750209
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from openstack import exceptions
from openstack.orchestration.v1 import stack
from openstack.tests.functional import base
from openstack.tests.functional.network.v2 import test_network
@unittest.skip("bug/1525005")
@unittest.skipUnless(base.service_exists(service_type='orchestration'),
'Orchestration service does not exist')
class TestStack(base.BaseFunctionalTest):
NAME = 'test_stack'
stack = None
network = None
subnet = None
cidr = '10.99.99.0/16'
@classmethod
def setUpClass(cls):
super(TestStack, cls).setUpClass()
if cls.conn.compute.find_keypair(cls.NAME) is None:
cls.conn.compute.create_keypair(name=cls.NAME)
image = next(cls.conn.image.images())
tname = "openstack/tests/functional/orchestration/v1/hello_world.yaml"
with open(tname) as f:
template = f.read()
cls.network, cls.subnet = test_network.create_network(cls.conn,
cls.NAME,
cls.cidr)
parameters = {
'image': image.id,
'key_name': cls.NAME,
'network': cls.network.id,
}
sot = cls.conn.orchestration.create_stack(
name=cls.NAME,
parameters=parameters,
template=template,
)
assert isinstance(sot, stack.Stack)
cls.assertIs(True, (sot.id is not None))
cls.stack = sot
cls.assertIs(cls.NAME, sot.name)
cls.conn.orchestration.wait_for_status(
sot, status='CREATE_COMPLETE', failures=['CREATE_FAILED'])
@classmethod
def tearDownClass(cls):
super(TestStack, cls).tearDownClass()
cls.conn.orchestration.delete_stack(cls.stack, ignore_missing=False)
cls.conn.compute.delete_keypair(cls.NAME)
# Need to wait for the stack to go away before network delete
try:
cls.conn.orchestration.wait_for_status(
cls.stack, 'DELETE_COMPLETE')
except exceptions.NotFoundException:
pass
cls.linger_for_delete()
test_network.delete_network(cls.conn, cls.network, cls.subnet)
def test_list(self):
names = [o.name for o in self.conn.orchestration.stacks()]
self.assertIn(self.NAME, names)
|
StarcoderdataPython
|
1726921
|
# -*- coding: utf-8 -*-
from setuptools import setup
try:
from pip._internal.req import parse_requirements
except:
from pip.req import parse_requirements
install_reqs = list(parse_requirements("requirements.txt", session={}))
def version():
from stubilous import version
return version.get_version()
setup(name="stubilous",
version=version(),
description="A plain simple Python http stub server",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/CodersOfTheNight/stubilous",
license="MIT",
packages=["stubilous"],
setup_requires=[str(ir.req) for ir in install_reqs] + ["pytest-runner"],
test_suite="pytest",
tests_require=["pytest", "requests"])
|
StarcoderdataPython
|
4832338
|
import ast
import json
import os
import unicodedata
import base64
from amazon.api import AmazonAPI
import logging
import base64
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def elicit_intent(message, responsecards):
return {
'dialogAction' : {
'type' : 'ElicitIntent',
'message' : message,
'responseCard' : {
'version' : 1,
'contentType' : 'application/vnd.amazonaws.card.generic',
'genericAttachments' : responsecards
}
}
}
def elicit(message):
return {
'dialogAction' : {
'type' : 'ElicitIntent',
'message' : message,
'responseCard' : {
'version' : 1,
'contentType' : 'application/vnd.amazonaws.card.generic',
'genericAttachments' : [
{
'title' : 'These are the trending technologies (as of TIOBE index)',
'buttons' : [
{
'text' : 'Java',
'value' : 'show java book'
},
{
'text' : 'C',
'value' : 'show C book'
},
{
'text' : 'Python',
'value' : 'show python book'
}
]
}
]
}
}
}
def create_response_cards(books_list, books_url, books_image):
length = books_list.__len__()
list = []
for incrementor in range(length):
template = {}
template['title'] = books_list[incrementor]
template['attachmentLinkUrl'] = books_url[incrementor]
template['imageUrl'] = books_image[incrementor]
list.append(template)
return list
def lambda_handler(event, context):
logger.debug('input to lambda: {}'.format(event) )
try:
session_attributes = event['sessionAttributes']['data']
decoded = base64.b64decode(session_attributes)
data = json.loads(decoded)
title = data[0]['title']
# AWSACCESSKEYID, AWSSECRETKEY, ASSOCIATETAG are declared as environment varialbes while creating the Lambda function
AWSAccessKeyId = os.environ.get("AWSACCESSKEYID")
AWSSecretKey = os.environ.get("AWSSECRETKEY")
associateTag = os.environ.get("ASSOCIATETAG")
api = AmazonAPI(AWSAccessKeyId , AWSSecretKey , associateTag)
decoded = base64.b64decode(session_attributes)
data = json.loads(decoded)
print data[0]
book_authors = []
book_authors = str(data[0]['authors']).split(",")
print 'authors after split:', book_authors
length = book_authors.__len__()
books_list = range(length)
books_url = []
books_image = []
for incrementor in range(length):
book_titles = ""
book_url = ""
book_image_url = ""
number = 0
print 'Author: ', book_authors[incrementor]
books = api.search(Author = book_authors[incrementor], SearchIndex = 'Books', salesrank = 'Bestselling')
for _, book in enumerate(books):
if number == 0:
book_titles = book.title[:78] + '..'
if book_titles == title:
continue
else:
#book_titles = book.title
book_url = book.detail_page_url.split('?',1)[0]
book_image_url = book.large_image_url
number += 1
else:
break
print 'other books by {} are {}'.format(book_authors[incrementor], book_titles)
books_list[incrementor] = book_titles
books_url.append(book_url)
books_image.append(book_image_url)
print 'titles: ', books_list
print 'urls: ', books_url
print 'images: ', books_image
content = ""
for len in range(books_list.__len__()):
content = content + " " + str(books_list[len])
message = {'contentType': 'PlainText', 'content': """The author(s) for {} is {}. Other books by the same author(s): {}""".format(data[0]['title'], data[0]['authors'], content)}
print books_list
output = elicit_intent(message, create_response_cards(books_list, books_url, books_image))
logger.debug('output to lex: {}'.format(output))
return output
except:
message = {'contentType': 'PlainText', 'content': """:-O Oops I forgot what we were talking about. How about these:"""}
return elicit(message)
|
StarcoderdataPython
|
1661681
|
<gh_stars>0
from django.db import models
class Wine(models.Model):
name=models.CharField(max_length=255)
description=models.TextField()
points=models.IntegerField(blank=False, default=0)
price=models.DecimalField(blank=False, default=0, max_digits=999, decimal_places=2)
ratio=models.DecimalField(blank=False, default=0, max_digits=999, decimal_places=2)
|
StarcoderdataPython
|
1657855
|
<filename>src/server/app/main/models/ChallengesModel.py
from .. import db
import datetime
from . import ChallengesTagsModel
class ChallengesModel(db.Model):
"""
[summary]
Args:
ChallengesMixin ([type]): [description]
db ([type]): [description]
"""
__tablename__ = "challenges"
id = db.Column(db.Integer, primary_key=True)
challenge_name = db.Column(db.String(80), nullable=False)
description = db.Column(db.Text, nullable=False)
problem_statement = db.Column(db.Text, nullable=False)
input_format = db.Column(db.Text, nullable=False)
constraints = db.Column(db.Text, nullable=True)
output_format = db.Column(db.Text, nullable=False)
difficulty = db.Column(db.String(20), nullable=False)
sample_input = db.Column(db.Text, nullable=False)
sample_output = db.Column(db.Text, nullable=False)
created_at = db.Column(db.DateTime(timezone=False),
nullable=False, default=datetime.datetime.now())
challenge_settings = db.relationship(
'ChallengeSettings', backref='challenge', lazy=True)
tags = db.relationship('TagsModel', secondary=ChallengesTagsModel.challenges_tags, lazy='subquery',
backref=db.backref('challenges', lazy=True))
test_cases = db.relationship(
'TestCasesModel', backref='challenge', lazy=True)
max_score = db.Column(db.Integer, nullable=True, default=0)
owner = db.Column(db.String(20), nullable=True)
|
StarcoderdataPython
|
109754
|
<filename>tests/pyxb/test-reserved.py
import unittest
import pyxb.binding.basis
class TestReserved (unittest.TestCase):
def testSTD (self):
tSTD = pyxb.binding.basis.simpleTypeDefinition
for k in tSTD.__dict__.keys():
if not k.startswith('_'):
self.assertTrue(k in tSTD._ReservedSymbols, k)
def testCTD (self):
tCTD = pyxb.binding.basis.complexTypeDefinition
for k in tCTD.__dict__.keys():
if not k.startswith('_'):
self.assertTrue(k in tCTD._ReservedSymbols, k)
if '__main__' == __name__:
unittest.main()
|
StarcoderdataPython
|
147507
|
<filename>model/constants.py<gh_stars>1-10
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
MODEL_NAME = "HBLM-USA 1.0"
samples = 100
response_variable = "LOG_SITE_ENERGY_kWh_yr"
predictor_variables = ["LOG_THERMAL_ENERGY_kWh_yr", "CLUSTER_LOG_SITE_EUI_kWh_m2yr"]
random_state = 170
n_clusters = 5
storey_height_m = 3
air_density_kgm3 = 1.202
hours_of_day = 24
COP_cooling = 3.3
COP_heating = 3.0
RH_base_cooling_perc = 60
RH_base_heating_perc = 30
T_base_cooling_C = 18.5
T_base_heating_C = 18.5
ACH_Commercial = 6.0
ACH_Residential = 4.0
ZONE_NAMES = {"Hot-humid": ["1A", "2A", "3A"],
"Hot-dry": ["2B", "3B"],
"Hot-marine": ["3C"],
"Mixed-humid": ["4A"],
"Mixed-dry": ["4B"],
"Mixed-marine": ["4C"],
"Cold-humid": ["5A", "6A"],
"Cold-dry": ["5B", "6B", "7"]}
|
StarcoderdataPython
|
187828
|
<filename>kthSmallestElementBST.py
class Solution(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
stack = [root]
BST_vals = []
while stack:
curr = stack.pop()
BST_vals.append(curr.val)
if curr.left != None:
stack.append(curr.left)
if curr.right != None:
stack.append(curr.right)
BST_vals.sort()
return BST_vals[k-1]
|
StarcoderdataPython
|
1685898
|
"""
https://en.wikipedia.org/wiki/Binary_search_algorithm
"""
def binary_search_recursion(array, key, left: int = 0, right: int = None) -> int:
"""
Binary search algorithm using recursion.
:param array: the sorted array to be searched.
:param key: the key value to be searched.
:param left: the left index of sub array.
:param right: the right index of sub array.
:return: index of key value if found, otherwise -1.
>>> array = list(range(10))
>>> for index, item in enumerate(array):
... assert index == binary_search_recursion(array, item)
>>> binary_search_recursion(array, 10) == -1
True
>>> binary_search_recursion(array, -1) == -1
True
"""
if right is None:
right = len(array) - 1
if left > right:
return -1
mid = (left + right) >> 1
if key == array[mid]:
return mid
elif key > array[mid]:
return binary_search_recursion(array, key, mid + 1, right)
else:
return binary_search_recursion(array, key, left, mid - 1)
if __name__ == "__main__":
from doctest import testmod
testmod()
|
StarcoderdataPython
|
1730605
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.8.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
#
# <p align="center">
# <img width="100%" src="../../multimedia/mindstorms_51515_logo.png">
# </p>
#
# # `hub_image_animation`
# Small demo of how to display an image and an animation using the hub LEDs.
#
# # Required robot
# * Hub
#
# <img src="../multimedia/hub.jpg" width="50%" align="center">
#
# # Source code
# You can find the code in the accompanying [`.py` file](https://github.com/arturomoncadatorres/lego-mindstorms/blob/main/examples/programs/hub_image_animation.py). To get it running, simply copy and paste it in a new Mindstorms project.
#
# # Imports
# %%
from mindstorms import MSHub, Motor, MotorPair, ColorSensor, DistanceSensor, App
from mindstorms.control import wait_for_seconds, wait_until, Timer
from mindstorms.operator import greater_than, greater_than_or_equal_to, less_than, less_than_or_equal_to, equal_to, not_equal_to
import math
# %%
import hub
# %%
print("-"*15 + " Execution started " + "-"*15 + "\n")
# %% [markdown]
# # Using `hub`
# Notice we won't be using the standard `MSHub`, but rather the "raw" `hub`.
# It is a little lower level, but it allows us making more things - like turning on the hub's pixels.
# Fore more information, see [Maarten Pennings brilliant explanation and unofficial documentation about it](https://github.com/maarten-pennings/Lego-Mindstorms/blob/main/ms4/faq.md#why-are-there-so-many-ways-to-do--in-python).
# %%
# Turn the central light off
hub.led(0, 0, 0)
# Alternatively, use
# hub.status_light.on('black')
# %% [markdown]
# # How to display an image
# Displaying an image is quite simple. We just need to define which pixels we will turn on and at what intensity.
# The pixel definition is done in a string in the shape
#
# `00000:00000:00000:00000:00000`
#
# where each number corresponds to a pixel. Each pixel can have a value between `0` (off) to `9` (on at full intensity).
# Each group of numbers (from left to right) correspond to a row of the hub (from top to bottom).
# Notice the groups (i.e., rows) are separated by a colon `:`.
#
# Therefore, if we want to turn on the central pixel of the hub at full intensity, we can do the following:
# %%
print("Displaying example image...")
img_example = hub.Image('00000:00000:00900:00000:00000')
hub.display.show(img_example)
wait_for_seconds(5)
print("DONE!")
# %% [markdown]
# # How to display an animation
# After displaying an image, displaying an animation is quite straightforward, since an animation is
# basically displaying a succession of images.
#
# In this example, we will display a very simple animation: a dot moving from top to bottom (with a tail).
# However, the basic principle can be translated to more complicated animations.
#
# I am sure there are plenty of ways to display an animation, but I found a simple way to do this is the following.
#
# First, we will define the frame of the animation in a list.
# %%
print("Defining animation frames...")
frames = ['00000:00000:00000:00000:00000',
'00900:00000:00000:00000:00000',
'00700:00900:00000:00000:00000',
'00500:00700:00900:00000:00000',
'00000:00500:00700:00900:00000',
'00000:00000:00500:00700:00900',
'00000:00000:00000:00500:00700',
'00000:00000:00000:00000:00500',
'00000:00000:00000:00000:00000']
n_frames = len(frames)
print("DONE!")
# %% [markdown]
# Then, we need to define the length of a pause between frames.
# The larger the pause, the slower the animation will be.
# %%
print("Defining delay between frames...")
t_pause = 0.05 # In seconds
print("DONE!")
# %% [markdown]
# Lastly, we display the frames (images) consecutively.
# This can be done very easily in a `for` loop.
# %%
print("Displaying animation...")
for ii in range(0, n_frames):
img = hub.Image(frames[ii])
hub.display.show(img)
wait_for_seconds(t_pause)
print("DONE!")
# %% [markdown]
# That's it!
# %%
print("-"*15 + " Execution ended " + "-"*15 + "\n")
|
StarcoderdataPython
|
1554
|
<gh_stars>0
import emoji
import sentiment_analysis.src.report.cons_report as cons
import sentiment_analysis.src.constants as global_cons
from utils.data_connection.api_data_manager import APISourcesFetcher
from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question
from sentiment_analysis.src.word_cloud import words_clouds
from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment
from nested_lookup import nested_lookup
class InterFaceReport:
def __init__(self, topics: dict, surveys: dict, company_id: str, weeks: list,
g_client: ClientsLanguageSentiment,
api_source_manager: APISourcesFetcher):
self.topics = topics
self.surveys = surveys
self.company_id = company_id
self.weeks = weeks
self.g_client = g_client
self.api_source_manager = api_source_manager
self.thresholds = ()
self.table_surveys_replies = []
self.table_topics = []
self.table_topic_comment = []
self.counter_text_sr = None
self.counter_text_topics = None
self.info_file = read_json_file("en_US.json")
self.image_base64_sr = None
self.image_base64_topics = None
def sort_by_dimension_sentiment_table(self) -> None:
"""
Sort by dimension and by sentiment
:return:
"""
temp_table = []
for dimension in cons.dimensions:
temp = [d for d in self.table_surveys_replies if d['dimension'] == dimension]
temp = sorted(temp, key=lambda k: k['sentiment'], reverse=True)
temp_table.extend(temp)
self.table_surveys_replies = temp_table
def insert_to_list_surveys_replies(self, features: list, company_week: int) -> None:
"""
Create array with the dictionary for interface
:param features: list of features to extract
:param company_week: company week of the company
:return:
"""
for item_analyze in features:
question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week)
dimension = extract_dimension(self.info_file, dimension=item_analyze[0])
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(dimension=dimension)
temp.update(question=question)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_surveys_replies.append(temp)
self.sort_by_dimension_sentiment_table()
def insert_to_list_topics(self, features: list) -> None:
"""
Create array with the dictionary for interface - referenced to topic headlines
:param features: list of features to extract
:return:
"""
for item_analyze in features:
topic_id = item_analyze[0]
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(id=topic_id)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_topics.append(temp)
self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True)
def insert_to_list_topic_comments(self, features: list) -> None:
"""
Create array with the dictionary for interface - referenced to topic comments
:param features: list of features to extract
:return:
"""
for item_analyze in features:
topic_id_comment_id = item_analyze[0]
comment = item_analyze[1]
sentiment = item_analyze[2]
temp = {}
temp.update(id=topic_id_comment_id)
temp.update(comment=emoji.emojize(comment, use_aliases=True))
temp.update(sentiment=sentiment)
self.table_topic_comment.append(temp)
self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True)
def word_cloud(self):
"""
Create wordcloud of the main words
:return:
"""
self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc)
self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc)
@staticmethod
def __count_filter_keys(entities: list) -> object:
"""
Count and filter keys
:param entities: list of entities text
:return:
"""
entities = ClientsLanguageSentiment.count_entities(entities=entities)
entities = ClientsLanguageSentiment.filter_black_list(entities=entities)
return entities
def __process_sr(self) -> None:
"""
Process the surveys replies
:return:
"""
for company_id, periods in self.surveys.items():
for period in self.weeks:
period_parts = period.split(CUSTOM_YEAR_WEEK_AGG)
translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0],
year=period_parts[1],
company_id=self.company_id)
sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods)
sr_content = nested_lookup(global_cons.SR_CONTENT, periods)
sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods)
sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods)
sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment))
self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week)
self.counter_text_sr = self.__count_filter_keys(entities=sr_entities)
def __process_topics(self) -> None:
"""
Process the topics
:return:
"""
for company_id, topics in self.topics.items():
# heading
topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics)
topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics)
topic_ids = list(topics.keys())
topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments))
self.insert_to_list_topics(topic_w_sentiments)
# comments
for topic_id, topic in topics.items():
topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic)
topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic)
topic_list_ids = [topic_id] * len(topic_comments)
topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores))
self.insert_to_list_topic_comments(topic_w_scores)
entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics)
self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities)
def process_interface(self) -> None:
"""
Take the info needed to write into report_pdf
:return:
"""
self.__process_sr()
self.__process_topics()
|
StarcoderdataPython
|
17786
|
<reponame>vishalbelsare/jina
from typing import Optional, Dict, Any
from fastapi import APIRouter
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
from ....excepts import PartialDaemon400Exception
from ....models import PodModel
from ....models.partial import PartialStoreItem
from ....stores import partial_store as store
router = APIRouter(prefix='/pod', tags=['pod'])
@router.get(
path='',
summary='Get status of a running Pod',
response_model=PartialStoreItem,
)
async def _status():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
return store.item
@router.post(
path='',
summary='Create a Pod',
description='Create a Pod and add it to the store',
status_code=201,
response_model=PartialStoreItem,
)
async def _create(pod: 'PodModel'):
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
try:
args = ArgNamespace.kwargs2namespace(pod.dict(), set_pod_parser())
return store.add(args)
except Exception as ex:
raise PartialDaemon400Exception from ex
@router.put(
path='/rolling_update',
summary='Run a rolling_update operation on the Pod object',
response_model=PartialStoreItem,
)
async def rolling_update(uses_with: Optional[Dict[str, Any]] = None):
"""
.. #noqa: DAR101
.. #noqa: DAR201
"""
try:
return await store.rolling_update(uses_with=uses_with)
except ValueError as ex:
raise PartialDaemon400Exception from ex
@router.put(
path='/scale',
summary='Run a scale operation on the Pod object',
response_model=PartialStoreItem,
)
async def scale(replicas: int):
"""
.. #noqa: DAR101
.. #noqa: DAR201
"""
try:
return await store.scale(replicas=replicas)
except ValueError as ex:
raise PartialDaemon400Exception from ex
@router.delete(
path='',
summary='Terminate the running Pod',
description='Terminate a running Pod and release its resources',
)
async def _delete():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
try:
store.delete()
except Exception as ex:
raise PartialDaemon400Exception from ex
@router.on_event('shutdown')
def _shutdown():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
store.delete()
|
StarcoderdataPython
|
70328
|
<gh_stars>0
import torch
class RegressionEvaluationMetrics(object):
@staticmethod
def r_squared(output, target):
x = output
y = target
vx = x - torch.mean(x)
vy = y - torch.mean(y)
cost = torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2)))
return cost ** 2
@staticmethod
def TM(output, target):
x = output
y = target
euclidean_distance = torch.dist(x, y)
x = x.view(1, -1)
y = y.view(-1, 1)
return torch.mm(x, y) / (euclidean_distance ** 2 + torch.mm(x, y))
@staticmethod
def ssim(output, target):
x = output
y = target
var_x, mean_x = torch.var_mean(x)
var_y, mean_y = torch.var_mean(y)
cov_x_y = torch.sum(torch.mul(x - mean_x, y - mean_y)) / x.view(-1, 1).shape[0]
c1 = (0.01 * 1.8) ** 2
c2 = (0.03 * 1.8) ** 2
return (2 * mean_x * mean_y + c1) * (2 * cov_x_y + c2) / (
(mean_x ** 2 + mean_y ** 2 + c1) * (var_x + var_y + c2))
|
StarcoderdataPython
|
3223901
|
<filename>Cloud-Net-A-semantic-segmentation-CNN-for-cloud-detection/Cloud-Net/augmentation.py<gh_stars>0
import numpy as np
import skimage
import skimage.transform as trans
"""
Some lines borrowed from: https://www.kaggle.com/sashakorekov/end-to-end-resnet50-with-tta-lb-0-93
"""
def rotate_clk_img_and_msk(img, msk):
angle = np.random.choice((4, 6, 8, 10, 12, 14, 16, 18, 20))
img_o = trans.rotate(img, angle, resize=False, preserve_range=True, mode='symmetric')
msk_o = trans.rotate(msk, angle, resize=False, preserve_range=True, mode='symmetric')
return img_o, msk_o
def rotate_cclk_img_and_msk(img, msk):
angle = np.random.choice((-20, -18, -16, -14, -12, -10, -8, -6, -4))
img_o = trans.rotate(img, angle, resize=False, preserve_range=True, mode='symmetric')
msk_o = trans.rotate(msk, angle, resize=False, preserve_range=True, mode='symmetric')
return img_o, msk_o
def flipping_img_and_msk(img, msk):
img_o = np.flip(img, axis=1)
msk_o = np.flip(msk, axis=1)
return img_o, msk_o
def zoom_img_and_msk(img, msk):
zoom_factor = np.random.choice((1.2, 1.5, 1.8, 2, 2.2, 2.5)) # currently doesn't have zoom out!
h, w = img.shape[:2]
# width and height of the zoomed image
zh = int(np.round(zoom_factor * h))
zw = int(np.round(zoom_factor * w))
img = trans.resize(img, (zh, zw), preserve_range=True, mode='symmetric')
msk = trans.resize(msk, (zh, zw), preserve_range=True, mode='symmetric')
region = np.random.choice((0, 1, 2, 3, 4))
# zooming out
if zoom_factor <= 1:
outimg = img
outmsk = msk
# zooming in
else:
# bounding box of the clipped region within the input array
if region == 0:
outimg = img[0:h, 0:w]
outmsk = msk[0:h, 0:w]
if region == 1:
outimg = img[0:h, zw - w:zw]
outmsk = msk[0:h, zw - w:zw]
if region == 2:
outimg = img[zh - h:zh, 0:w]
outmsk = msk[zh - h:zh, 0:w]
if region == 3:
outimg = img[zh - h:zh, zw - w:zw]
outmsk = msk[zh - h:zh, zw - w:zw]
if region == 4:
marh = h // 2
marw = w // 2
outimg = img[(zh // 2 - marh):(zh // 2 + marh), (zw // 2 - marw):(zw // 2 + marw)]
outmsk = msk[(zh // 2 - marh):(zh // 2 + marh), (zw // 2 - marw):(zw // 2 + marw)]
# to make sure the output is in the same size of the input
img_o = trans.resize(outimg, (h, w), preserve_range=True, mode='symmetric')
msk_o = trans.resize(outmsk, (h, w), preserve_range=True, mode='symmetric')
return img_o, msk_o
|
StarcoderdataPython
|
3239076
|
from PyQt5 import QtWidgets, QtCore
class LineEdit(QtWidgets.QLineEdit):
activated = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
go_icon = self.style().standardIcon(QtWidgets.QStyle.SP_DialogOkButton)
activated_action = self.addAction(go_icon, QtWidgets.QLineEdit.TrailingPosition)
activated_action.triggered.connect(lambda: self.activated.emit(self.text()))
return
|
StarcoderdataPython
|
4834459
|
<reponame>monisjaved/Data-Processing-With-Hadoop<gh_stars>1-10
#!/usr/bin/env python
import sys
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
words = line.split()
for i in xrange(len(words)-1):
for j in range(i+1,len(words)):
print "%s|%s\t%s" % (words[i],words[j], 1)
|
StarcoderdataPython
|
3285554
|
<filename>rsHRF/rsHRF_GUI/gui_windows/inputWindow.py
import os
from tkinter import Toplevel, Checkbutton, IntVar, Button, filedialog, NORMAL, DISABLED, OptionMenu, StringVar, Label
class InputWindow():
def __init__(self):
# input window
window = Toplevel()
window.title("Input Window")
# get screen width and height
screen_width = window.winfo_screenwidth()
screen_height = window.winfo_screenheight()
# placing the toplevel
window.geometry("350x220+%d+%d" % ((280/1900)*screen_width, ((((1040.0-220)/1000)*screen_height)-390)-280))
# variables which shall get sent to the front end
self.input_file = ()
self.mask_file = ()
self.file_type = ()
self.output_dir = ()
# other class vairables
# 1 corresponds to BIDS input
self.inputFormatVar = IntVar()
self.inputFormatVar.set(0)
# 1 corresponds to mask file being present in the BIDS directory
self.maskFormatVar = IntVar()
self.maskFormatVar.set(0)
# selecting the estimation rule
self.estimationOption = StringVar()
self.estimationOption.set('canon2dd')
def getInputFile():
if self.inputFormatVar.get(): # input takes a directory
self.input_file = filedialog.askdirectory(initialdir=os.getcwd())
maskFormat.configure(state=NORMAL)
else:
self.input_file = filedialog.askopenfilename(initialdir=os.getcwd(), title="Input File Path", filetypes=(("nifti files", "*.nii"), ("nifti files", "*.nii.gz"), ("gifti files", "*.gii"), ("gifti files", "*.gii.gz")))
maskFormat.configure(state=DISABLED)
try:
self.file_type = os.path.splitext(self.input_file)[1]
except:
self.file_type = ()
try:
inputFileLabel.configure(text=self.input_file.split('/')[-1])
except:
inputFileLabel.configure(text="")
def maskFormatButtonState():
if self.maskFormatVar.get():
maskFormatButton.configure(state=DISABLED)
else:
maskFormatButton.configure(state=NORMAL)
def inputFormatButtonState():
if self.inputFormatVar.get():
maskFormat.configure(state=NORMAL)
else:
maskFormat.configure(state=DISABLED)
maskFormatButtonState()
def getMaskFile():
self.mask_file = filedialog.askopenfilename(initialdir=os.getcwd(), title="Input File Path", filetypes=(("nifti files", "*.nii"), ("nifti files", "*.nii.gz"), ("gifti files", "*.gii"), ("gifti files", "*.gii.gz")))
try:
maskFileLabel.configure(text=self.mask_file.split("/")[-1])
except:
maskFileLabel.configure(text="")
def getOutputDir():
self.output_dir = filedialog.askdirectory(initialdir=os.getcwd())
try:
outputPathLabel.configure(text="Output path: " + self.output_dir.split("/")[-1])
except:
outputPathLabel.configure(text="")
# defining widgets
inputFormat = Checkbutton(window, text="BIDS Format", variable=self.inputFormatVar, command=inputFormatButtonState)
maskFormat = Checkbutton(window, text="Mask File in BIDS", variable=self.maskFormatVar, state=DISABLED, command=maskFormatButtonState)
inputFormatButton = Button (window, text="Select Input", command=getInputFile, height=1, width=20)
maskFormatButton = Button (window, text="Select Mask File", state=NORMAL, command=getMaskFile, height=1, width=20)
outputPathButton = Button (window, text="Select Output Directory", command=getOutputDir, height=1, width=20)
estimationLabel = Label (window, text="Estimation Rule: ")
inputFileLabel = Label (window, text="")
maskFileLabel = Label (window, text="")
outputPathLabel = Label (window, text="")
estimationDropDown = OptionMenu (window, self.estimationOption, "canon2dd", "sFIR", "FIR", "gamma", "fourier", "fourier w/ hanning")
# placing widgets
inputFormat.grid (row=0,column=0,padx=(5,5),pady=(5,5))
inputFormatButton.grid (row=0,column=1,padx=(5,5),pady=(5,5))
inputFileLabel.grid (row=1,column=0,padx=(5,5),pady=(5,5),columnspan=2)
maskFormat.grid (row=2,column=0,padx=(5,5),pady=(5,5))
maskFormatButton.grid (row=2,column=1,padx=(5,5),pady=(5,5))
maskFileLabel.grid (row=3,column=0,padx=(5,5),pady=(5,5),columnspan=2)
outputPathButton.grid (row=4,column=1,padx=(5,5),pady=(5,5))
outputPathLabel.grid (row=4,column=0,padx=(5,5),pady=(5,5))
estimationLabel.grid (row=5,column=0,padx=(5,5),pady=(5,5))
estimationDropDown.grid(row=5,column=1,padx=(5,5),pady=(5,5))
def getInput(self):
if self.inputFormatVar.get() * self.maskFormatVar.get():
mode = "bids"
elif self.inputFormatVar.get():
mode = "bids w/ atlas"
else:
mode = "file"
return (self.input_file, self.mask_file, self.file_type, mode, self.estimationOption.get(), self.output_dir)
|
StarcoderdataPython
|
1609896
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=maybe-no-member, invalid-name, too-many-lines
"""Test request import and updates."""
import unittest
import datetime
import collections
import ddt
import freezegun
from mock import mock
from ggrc import db
from ggrc import utils
from ggrc.models import all_models
from ggrc.access_control.role import get_custom_roles_for
from ggrc.converters import errors
from integration.ggrc import generator
from integration.ggrc import TestCase
from integration.ggrc.models import factories
# pylint: disable=too-many-public-methods
@ddt.ddt
class TestAssessmentImport(TestCase):
"""Basic Assessment import tests with.
This test suite should test new Assessment imports, exports, and updates.
The main focus of these tests is checking error messages for invalid state
transitions.
"""
def setUp(self):
"""Set up for Assessment test cases."""
super(TestAssessmentImport, self).setUp()
self.client.get("/login")
def test_import_assessments_with_templates(self):
"""Test importing of assessments with templates."""
with factories.single_commit():
audit = factories.AuditFactory()
assessment_template = factories.AssessmentTemplateFactory(audit=audit)
assessment_template_slug = assessment_template.slug
factories.CustomAttributeDefinitionFactory(
title='test_attr1',
definition_type='assessment_template',
definition_id=assessment_template.id,
)
factories.CustomAttributeDefinitionFactory(
title='test_attr2',
attribute_type="Date",
definition_type='assessment_template',
definition_id=assessment_template.id,
)
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Template", assessment_template_slug),
("Audit", audit.slug),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "Assessment 1"),
("test_attr1", "abc"),
("test_attr2", "7/15/2015"),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.title == "Assessment 1").first()
values = set(v.attribute_value for v in assessment.custom_attribute_values)
self.assertIn("abc", values)
self.assertIn("2015-07-15", values)
def test_import_assessment_with_evidence_file(self):
"""Test import evidence file should add warning"""
evidence_url = "test_gdrive_url"
audit = factories.AuditFactory()
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Title*", "Assessment1"),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Evidence File", evidence_url),
]))
evidences = all_models.Evidence.query.filter(
all_models.Evidence.kind == all_models.Evidence.FILE).all()
self.assertEquals(len(evidences), 0)
expected_warning = (u"Line 3: 'Evidence File' can't be changed via "
u"import. Please go on Assessment page and make "
u"changes manually. The column will be skipped")
expected_messages = {
"Assessment": {
"row_warnings": {expected_warning},
}
}
self._check_csv_response(response, expected_messages)
def test_import_assessment_with_evidence_file_existing(self):
"""If file already mapped to evidence not show warning to user"""
evidence_url = "test_gdrive_url"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory()
assessment_slug = assessment.slug
factories.RelationshipFactory(source=audit,
destination=assessment)
evidence = factories.EvidenceFileFactory(link=evidence_url)
factories.RelationshipFactory(source=assessment,
destination=evidence)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment_slug),
("Evidence File", evidence_url),
]))
self.assertEquals([], response[0]['row_warnings'])
def test_import_assessment_with_template(self):
"""If assessment exist and import with template and lca"""
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory()
template = factories.AssessmentTemplateFactory()
factories.RelationshipFactory(source=audit,
destination=assessment)
factories.CustomAttributeDefinitionFactory(
title="Test LCA",
definition_type="assessment",
attribute_type="Text",
definition_id=assessment.id
)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("Template", template.slug),
]))
self.assertEquals([], response[0]["row_warnings"])
self.assertEquals([], response[0]["row_errors"])
def test_import_assessment_with_evidence_url_existing(self):
"""If url already mapped to assessment ignore it"""
evidence_url = "test_gdrive_url"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory()
assessment_slug = assessment.slug
factories.RelationshipFactory(source=audit,
destination=assessment)
evidence = factories.EvidenceUrlFactory(link=evidence_url)
factories.RelationshipFactory(source=assessment,
destination=evidence)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment_slug),
("Evidence Url", evidence_url),
]))
evidences = all_models.Evidence.query.filter_by(link=evidence_url).all()
self.assertEquals(1, len(evidences))
self.assertEquals([], response[0]['row_warnings'])
def test_import_assessment_with_evidence_file_multiple(self):
"""Show warning if at least one of Evidence Files not mapped"""
evidence_url = "test_gdrive_url"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory()
assessment_slug = assessment.slug
factories.RelationshipFactory(source=audit,
destination=assessment)
evidence1 = factories.EvidenceFileFactory(link=evidence_url)
factories.RelationshipFactory(source=assessment,
destination=evidence1)
evidence2 = factories.EvidenceFileFactory(link="test_gdrive_url_2")
factories.RelationshipFactory(source=assessment,
destination=evidence2)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment_slug),
("Evidence File", evidence_url + "\n another_gdrive_url"),
]))
expected_warning = (u"Line 3: 'Evidence File' can't be changed via import."
u" Please go on Assessment page and make changes"
u" manually. The column will be skipped")
self.assertEquals([expected_warning], response[0]['row_warnings'])
def test_import_assessment_with_evidence_file_blank_multiple(self):
"""No warnings in Evidence Files"""
evidence_file = "test_gdrive_url \n \n another_gdrive_url"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory()
assessment_slug = assessment.slug
factories.RelationshipFactory(source=audit, destination=assessment)
evidence1 = factories.EvidenceFileFactory(link="test_gdrive_url")
factories.RelationshipFactory(source=assessment,
destination=evidence1)
evidence2 = factories.EvidenceFileFactory(link="another_gdrive_url")
factories.RelationshipFactory(source=assessment,
destination=evidence2)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment_slug),
("Evidence File", evidence_file),
]))
self.assertEquals([], response[0]['row_warnings'])
@mock.patch('ggrc.gdrive.file_actions.process_gdrive_file')
@mock.patch('ggrc.gdrive.file_actions.get_gdrive_file_link')
def test_assessment_bulk_mode(self, get_gdrive_link, process_gdrive_mock):
"""Test import assessment evidence file in bulk_import mode"""
evidence_file = "mock_id"
process_gdrive_mock.return_value = {
"id": "mock_id",
"webViewLink": "mock_link",
"name": "mock_name",
}
get_gdrive_link.return_value = "mock_id"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory()
assessment_slug = assessment.slug
factories.RelationshipFactory(source=audit, destination=assessment)
with mock.patch("ggrc.converters.base.ImportConverter.is_bulk_import",
return_value=True):
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment_slug),
("Evidence File", evidence_file),
]))
self.assertEqual(process_gdrive_mock.call_count, 1)
self.assertEqual(get_gdrive_link.call_count, 1)
self._check_csv_response(response, {})
assessment = all_models.Assessment.query.filter_by(
slug=assessment_slug
).first()
self.assertEqual(len(assessment.evidences_file), 1)
@mock.patch('ggrc.gdrive.file_actions.process_gdrive_file')
@mock.patch('ggrc.gdrive.file_actions.get_gdrive_file_link')
def test_bulk_mode_update_evidence(self, get_gdrive_link,
process_gdrive_mock):
"""Test update assessment evidence file in bulk_import mode"""
evidence_file = "mock_id2"
process_gdrive_mock.return_value = {
"id": "mock_id2",
"webViewLink": "mock_link2",
"name": "mock_name2",
}
get_gdrive_link.return_value = "mock_id"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory()
assessment_slug = assessment.slug
factories.RelationshipFactory(source=audit, destination=assessment)
evidence = factories.EvidenceFileFactory(link="mock_link",
gdrive_id="mock_id")
factories.RelationshipFactory(source=assessment, destination=evidence)
with mock.patch("ggrc.converters.base.ImportConverter.is_bulk_import",
return_value=True):
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment_slug),
("Evidence File", evidence_file),
]))
self.assertEqual(process_gdrive_mock.call_count, 1)
self.assertEqual(get_gdrive_link.call_count, 1)
self._check_csv_response(response, {})
assessment = all_models.Assessment.query.filter_by(
slug=assessment_slug
).first()
self.assertEqual(len(assessment.evidences_file), 1)
for evidence in assessment.evidences_file:
self.assertEqual(evidence.gdrive_id, "mock_id2")
self.assertEqual(evidence.link, "mock_link2")
self.assertEqual(evidence.title, "mock_name2")
def _test_assessment_users(self, asmt, users):
""" Test that all users have correct roles on specified Assessment"""
verification_errors = ""
ac_roles = {
acr_name: acr_id
for acr_id, acr_name in get_custom_roles_for(asmt.type).items()
}
for user_name, expected_types in users.items():
for role in expected_types:
try:
user = all_models.Person.query.filter_by(name=user_name).first()
acl_len = all_models.AccessControlPerson.query.join(
all_models.AccessControlList
).filter(
all_models.AccessControlList.ac_role_id == ac_roles[role],
all_models.AccessControlPerson.person_id == user.id,
all_models.AccessControlList.object_id == asmt.id,
all_models.AccessControlList.object_type == asmt.type,
).count()
self.assertEqual(
acl_len, 1,
"User {} is not mapped to {}".format(user.email, asmt.slug)
)
except AssertionError as error:
verification_errors += "\n\nChecks for Users-Assessment mapping "\
"failed for user '{}' with:\n{}".format(user_name, str(error))
self.assertEqual(verification_errors, "", verification_errors)
def _test_assigned_user(self, assessment, user_id, role):
"""Check if user has role on assessment"""
acls = all_models.AccessControlPerson.query.join(
all_models.AccessControlList
).filter(
all_models.AccessControlPerson.person_id == user_id,
all_models.AccessControlList.object_id == assessment.id,
all_models.AccessControlList.object_type == assessment.type,
)
self.assertEqual(
[user_id] if user_id else [],
[i.person_id for i in acls if i.ac_list.ac_role.name == role]
)
def test_assessment_full_no_warnings(self):
""" Test full assessment import with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
with factories.single_commit():
for i in range(1, 4):
factories.PersonFactory(
name="user {}".format(i),
email="<EMAIL>".<EMAIL>(i)
)
audit = factories.AuditFactory()
assessment_data = [
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "<EMAIL>\n<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "Assessment 1"),
("Evidence Url", "http://i.imgur.com/Lppr347.jpg")
]),
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", "<EMAIL>\n<EMAIL>"),
("Creators", "<EMAIL>\<EMAIL>"),
("Title", "Assessment 2"),
("Status", "In Progress")
]),
]
self.import_data(*assessment_data)
# Test first Assessment
asmt_1 = all_models.Assessment.query.filter_by(
title="Assessment 1").first()
users = {
"user 1": {"Assignees"},
"user 2": {"Assignees", "Creators"},
"user 3": {}
}
self._test_assessment_users(asmt_1, users)
self.assertEqual(asmt_1.status, all_models.Assessment.PROGRESS_STATE)
# Test second Assessment
asmt_2 = all_models.Assessment.query.filter_by(
title="Assessment 2").first()
users = {
"user 1": {"Assignees"},
"user 2": {"Creators"},
"user 3": {"Assignees", "Creators"},
}
self._test_assessment_users(asmt_2, users)
self.assertEqual(asmt_2.status, all_models.Assessment.PROGRESS_STATE)
audit = [obj for obj in asmt_1.related_objects() if obj.type == "Audit"][0]
self.assertEqual(audit.context, asmt_1.context)
evidence = all_models.Evidence.query.filter_by(
link="http://i.imgur.com/Lppr347.jpg").first()
self.assertEqual(audit.context, evidence.context)
@ddt.data(
("In PROGRESS",
{
"State": "Verified",
"Verifiers": "<EMAIL>",
},
all_models.Assessment.PROGRESS_STATE
),
("not started",
{
"State": "In Review",
"Verifiers": "<EMAIL>",
"Title": "Modified Assessment",
"Notes": "Edited Notes"
},
all_models.Assessment.PROGRESS_STATE
)
)
@ddt.unpack
def test_assessment_import_states(self, start_status,
modified_data, expected_status):
""" Test Assessment state imports
These tests are an intermediate part for zucchini release and will be
updated in the next release.
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=299569476
"""
emails = ["<EMAIL>", "<EMAIL>"]
with factories.single_commit():
audit = factories.AuditFactory()
audit_slug = audit.slug
for email in emails:
factories.PersonFactory(email=email)
assessment_data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit_slug),
("Assignees*", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "New Assessment"),
("State", start_status)
])
self.import_data(assessment_data)
assessment = all_models.Assessment.query.filter_by(
title="New Assessment").first()
assessment_slug = assessment.slug
modified_asmt_data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment_slug),
])
modified_asmt_data.update(modified_data)
response = self.import_data(modified_asmt_data)
self._check_csv_response(response, {
"Assessment": {
"row_warnings": {
errors.STATE_WILL_BE_IGNORED.format(line=3),
}
}
})
assessment = all_models.Assessment.query.first()
self.assertEqual(assessment.status, expected_status)
@unittest.skip("Test randomly fails because backend does not return errors")
def test_error_ca_import_states(self):
"""Test changing state of Assessment with unfilled mandatory CA"""
with factories.single_commit():
audit = factories.AuditFactory()
asmnt = factories.AssessmentFactory(audit=audit)
factories.CustomAttributeDefinitionFactory(
title="def1",
definition_type="assessment",
definition_id=asmnt.id,
attribute_type="Text",
mandatory=True,
)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmnt.slug),
("Audit", audit.slug),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", asmnt.title),
("State", "Completed"),
]))
expected_errors = {
"Assessment": {
"row_errors": {
errors.VALIDATION_ERROR.format(
line=3,
column_name="State",
message="CA-introduced completion preconditions are not "
"satisfied. Check preconditions_failed of items "
"of self.custom_attribute_values"
)
}
}
}
self._check_csv_response(response, expected_errors)
asmnt = all_models.Assessment.query.filter(
all_models.Assessment.slug == asmnt.slug
).first()
self.assertEqual(asmnt.status, "Not Started")
@ddt.data(
(
[
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "Some title"),
("Unexpected Column", "Some value")
])
],
{
"Assessment": {
"block_warnings": {
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="unexpected column"
)
}
}
}
),
(
[
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "Some title"),
("map:project", "")
])
],
{
"Assessment": {
"block_warnings": {
errors.UNSUPPORTED_MAPPING.format(
line=2,
obj_a="Assessment",
obj_b="Project",
column_name="map:project"
)
}
}
}
),
(
[
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", "not existing"),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "Some title"),
])
],
{
"Assessment": {
"row_errors": {
errors.MISSING_VALUE_ERROR.format(
line=3,
column_name="Audit"
)
},
"row_warnings": {
errors.UNKNOWN_OBJECT.format(
line=3,
object_type="Audit",
slug="not existing"
)
}
}
}
),
(
[
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "Some title"),
("State", "Open")
])
],
{
"Assessment": {
"row_warnings": {
errors.WRONG_VALUE_DEFAULT.format(
line=3,
column_name="State",
value="open",
)
}
}
}
),
(
[
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Title", "New Assessment"),
("Creators", "<EMAIL>"),
("Assignees", "<EMAIL>"),
("Verifiers", "<EMAIL>"),
("Finished Date", "7/3/2015"),
("Verified Date", "5/14/2016"),
]),
collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Verified Date", "5/15/2016"),
])
],
{
"Assessment": {
"row_warnings": {
errors.UNMODIFIABLE_COLUMN.format(
line=3,
column_name="Verified Date"
)
}
}
}
),
)
@ddt.unpack
def test_assessment_warnings_errors(self, assessment_data, expected_errors):
""" Test full assessment import with warnings and errors
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=889865936
"""
if len(assessment_data) == 1:
if "Audit*" not in assessment_data[0]:
audit = factories.AuditFactory()
assessment_data[0]["Audit*"] = audit.slug
response = self.import_data(*assessment_data)
else:
audit = factories.AuditFactory()
assessment_data[0]["Audit*"] = audit.slug
self.import_data(assessment_data[0])
assessment = all_models.Assessment.query.filter_by(
title="New Assessment").first()
assessment_data[1]["Code*"] = assessment.slug
assessment_data[1]["Audit*"] = audit.slug
response = self.import_data(assessment_data[1])
self._check_csv_response(response, expected_errors)
def test_blank_optional_field(self):
"""Test warnings while import assessment with blank IssueTracker fields"""
audit = factories.AuditFactory()
resp = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Title*", "ass1"),
("Creators*", "<EMAIL>"),
("Assignees*", "<EMAIL>"),
("Component ID", ""),
("Hotlist ID", ""),
("Priority", ""),
("Severity", ""),
("Issue Type", ""),
("Ticket Title", ""),
("Ticket Tracker Integration", ""),
]))
self._check_csv_response(resp, {})
def test_mapping_control_through_snapshot(self):
"Test for add mapping control on assessment"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = all_models.Revision.query.filter(
all_models.Revision.resource_id == control.id,
all_models.Revision.resource_type == control.__class__.__name__
).order_by(
all_models.Revision.id.desc()
).first()
factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
self.assertFalse(db.session.query(
all_models.Relationship.get_related_query(
assessment, all_models.Snapshot()
).exists()).first()[0])
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("map:Control versions", control.slug),
]))
self.assertTrue(db.session.query(
all_models.Relationship.get_related_query(
assessment, all_models.Snapshot()
).exists()).first()[0])
@ddt.data(
("yes", True),
("no", True),
("invalid_data", False),
)
@ddt.unpack
def test_import_view_only_field(self, value, is_valid):
"Test import view only fields"
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
resp = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("archived", value),
]))
row_warnings = []
if not is_valid:
row_warnings.append(u"Line 3: Field 'Archived' contains invalid data. "
u"The value will be ignored.")
self.assertEqual(
[{
u'ignored': 0,
u'updated': 1,
u'block_errors': [],
u'name': u'Assessment',
u'created': 0,
u'deleted': 0,
u'deprecated': 0,
u'row_warnings': row_warnings,
u'rows': 1,
u'block_warnings': [],
u'row_errors': [],
}],
resp)
@ddt.data((False, "no", 0, 1, []),
(True, "yes", 1, 0, [u'Line 3: Importing archived instance is '
u'prohibited. The line will be ignored.']))
@ddt.unpack
# pylint: disable=too-many-arguments
def test_import_archived_assessment(self, is_archived, value, ignored,
updated, row_errors):
"""Test archived assessment import procedure"""
with factories.single_commit():
audit = factories.AuditFactory(archived=is_archived)
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
resp = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("archived", value),
("description", "archived assessment description")
]))
self.assertEqual([{
u'ignored': ignored,
u'updated': updated,
u'block_errors': [],
u'name': u'Assessment',
u'created': 0,
u'deleted': 0,
u'deprecated': 0,
u'row_warnings': [],
u'rows': 1,
u'block_warnings': [],
u'row_errors': row_errors
}], resp)
def test_create_new_assessment_with_mapped_control(self):
"Test for creation assessment with mapped controls"
with factories.single_commit():
audit = factories.AuditFactory()
control = factories.ControlFactory()
revision = all_models.Revision.query.filter(
all_models.Revision.resource_id == control.id,
all_models.Revision.resource_type == control.__class__.__name__
).order_by(
all_models.Revision.id.desc()
).first()
factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
self.assertFalse(db.session.query(
all_models.Relationship.get_related_query(
all_models.Assessment(), all_models.Snapshot()
).exists()).first()[0])
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", all_models.Person.query.all()[0].email),
("Creators", all_models.Person.query.all()[0].email),
("Title", "Strange title"),
("map:Control versions", control.slug),
]))
self._check_csv_response(response, {})
assessment = all_models.Assessment.query.filter(
all_models.Assessment.title == "Strange title"
).first()
self.assertTrue(db.session.query(all_models.Relationship.get_related_query(
assessment, all_models.Snapshot()).exists()).first()[0]
)
def test_create_import_assignee(self):
"Test for creation assessment with mapped assignees"
name = "test_name"
email = "<EMAIL>"
with factories.single_commit():
audit = factories.AuditFactory()
assignee_id = factories.PersonFactory(name=name, email=email).id
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", email),
("Creators", all_models.Person.query.all()[0].email),
("Title", "Strange title"),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.title == "Strange title"
).first()
self._test_assigned_user(assessment, assignee_id, "Assignees")
def test_create_import_creators(self):
"Test for creation assessment with mapped creator"
name = "test_name"
email = "<EMAIL>"
with factories.single_commit():
audit = factories.AuditFactory()
creator_id = factories.PersonFactory(name=name, email=email).id
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Audit*", audit.slug),
("Assignees*", all_models.Person.query.all()[0].email),
("Creators", email),
("Title", "Strange title"),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.title == "Strange title"
).first()
self._test_assigned_user(assessment, creator_id, "Creators")
def test_update_import_creators(self):
"Test for creation assessment with mapped creator"
slug = "TestAssessment"
name = "test_name"
email = "<EMAIL>"
with factories.single_commit():
assessment = factories.AssessmentFactory(slug=slug)
creator_id = factories.PersonFactory(name=name, email=email).id
self._test_assigned_user(assessment, None, "Creators")
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Creators", email),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == slug
).first()
self._test_assigned_user(assessment, creator_id, "Creators")
def test_update_import_assignee(self):
"Test for creation assessment with mapped creator"
slug = "TestAssessment"
name = "test_name"
email = "<EMAIL>"
with factories.single_commit():
assessment = factories.AssessmentFactory(slug=slug)
assignee_id = factories.PersonFactory(name=name, email=email).id
self._test_assigned_user(assessment, None, "Assignees")
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Assignees", email),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == slug
).first()
self._test_assigned_user(assessment, assignee_id, "Assignees")
def test_update_import_verifiers(self):
"""Test import does not delete verifiers if empty value imported"""
slug = "TestAssessment"
assessment = factories.AssessmentFactory(slug=slug)
name = "test_name"
email = "<EMAIL>"
verifier = factories.PersonFactory(name=name, email=email)
verifier_id = verifier.id
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Verifiers", email),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == slug
).first()
self._test_assigned_user(assessment, verifier_id, "Verifiers")
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Verifiers", ""),
]))
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == slug
).first()
self._test_assigned_user(assessment, verifier_id, "Verifiers")
@ddt.data(
(
"Created Date",
lambda: datetime.date.today() - datetime.timedelta(7),
),
)
@ddt.unpack
def test_update_non_changeable_field(self, field, value_creator):
"""Test importing Assessment's "Created Date" field"""
slug = "TestAssessment"
with factories.single_commit():
value = value_creator()
factories.AssessmentFactory(
slug=slug,
modified_by=factories.PersonFactory(email="<EMAIL>"),
)
data = [{
"object_name": "Assessment",
"fields": "all",
"filters": {
"expression": {
"left": "code",
"op": {"name": "="},
"right": slug
},
}
}]
before_update = self.export_parsed_csv(data)["Assessment"][0][field]
with freezegun.freeze_time("2017-9-10"):
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
(field, value)
]))
self.assertEqual(before_update,
self.export_parsed_csv(data)["Assessment"][0][field])
@ddt.data(
("Last Updated By", "<EMAIL>"),
)
@ddt.unpack
def test_exportable_only_updated_by(self, field, value):
"""Test exportable only "Last Updated By" field"""
slug = "TestAssessment"
with factories.single_commit():
factories.AssessmentFactory(
slug=slug,
modified_by=factories.PersonFactory(email="<EMAIL>"),
)
data = [{
"object_name": "Assessment",
"fields": "all",
"filters": {
"expression": {
"left": "code",
"op": {"name": "="},
"right": slug
},
}
}]
before_update = self.export_parsed_csv(data)["Assessment"][0][field]
self.assertEqual(before_update, "<EMAIL>")
self.import_data(collections.OrderedDict(
[
("object_type", "Assessment"),
("Code*", slug),
(field, value)
]
))
after_update = self.export_parsed_csv(data)["Assessment"][0][field]
self.assertEqual(after_update, "<EMAIL>")
def test_import_last_deprecated_date(self):
"""Last Deprecated Date on assessment should be non editable."""
with factories.single_commit():
with freezegun.freeze_time("2017-01-01"):
assessment = factories.AssessmentFactory(status="Deprecated")
resp = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("code", assessment.slug),
("Last Deprecated Date", "02/02/2017"),
]))
result = all_models.Assessment.query.get(assessment.id)
self.assertEqual(1, len(resp))
self.assertEqual(1, resp[0]["updated"])
self.assertEqual(result.end_date, datetime.date(2017, 1, 1))
@ddt.data(*all_models.Assessment.VALID_STATES)
def test_import_set_up_deprecated(self, start_state):
"""Update assessment from {0} to Deprecated."""
with factories.single_commit():
assessment = factories.AssessmentFactory(status=start_state)
resp = self.import_data(
collections.OrderedDict([
("object_type", "Assessment"),
("code", assessment.slug),
("State", all_models.Assessment.DEPRECATED),
]))
self.assertEqual(1, len(resp))
self.assertEqual(1, resp[0]["updated"])
self.assertEqual(
all_models.Assessment.query.get(assessment.id).status,
all_models.Assessment.DEPRECATED)
def test_asmnt_cads_update_completed(self):
"""Test update of assessment without cads."""
with factories.single_commit():
audit = factories.AuditFactory()
asmnt = factories.AssessmentFactory(audit=audit)
factories.CustomAttributeDefinitionFactory(
title="CAD",
definition_type="assessment",
definition_id=asmnt.id,
attribute_type="Text",
mandatory=True,
)
data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmnt.slug),
("Audit", audit.slug),
("Title", "Test title"),
("State", "Completed"),
("CAD", "Some value"),
])
response = self.import_data(data)
self._check_csv_response(response, {})
def test_import_complete_missing_answers_warnings(self):
"""Test complete assessment with missing mandatory CAD comments."""
with factories.single_commit():
audit = factories.AuditFactory()
asmnt = factories.AssessmentFactory(audit=audit)
factories.CustomAttributeDefinitionFactory(
title="CAD",
definition_type="assessment",
definition_id=asmnt.id,
attribute_type="Dropdown",
multi_choice_options="no,yes",
multi_choice_mandatory="0,1"
)
data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmnt.slug),
("Audit", audit.slug),
("Title", "Test title"),
("State", "Completed"),
("CAD", "yes"),
])
expected_response = {
"Assessment": {
"row_warnings": {
errors.NO_REQUIRED_ANSWERS_WARNING.format(line=3),
}
}
}
response = self.import_data(data)
self._check_csv_response(response, expected_response)
def test_import_asmnt_rev_query_count(self):
"""Test only one revisions insert query should occur while importing."""
with factories.single_commit():
audit = factories.AuditFactory()
asmnt = factories.AssessmentFactory(audit=audit)
cad_names = ("CAD1", "CAD2", "CAD3")
for name in cad_names:
factories.CustomAttributeDefinitionFactory(
title=name,
definition_type="assessment",
definition_id=asmnt.id,
attribute_type="Text",
mandatory=True,
)
data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmnt.slug),
("Audit", audit.slug),
("Title", "Test title"),
("State", "Completed"),
("CAD1", "Some value 1"),
("CAD2", "Some value 2"),
("CAD3", "Some value 3"),
])
with utils.QueryCounter() as counter:
response = self.import_data(data)
self._check_csv_response(response, {})
rev_insert_queries = [query for query in counter.queries
if 'INSERT INTO revisions' in query]
self.assertEqual(len(rev_insert_queries), 1)
def test_asmt_verified_date_update_from_none(self):
"""Test that we able to set Verified Date if it is empty"""
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assessment.slug),
("Verifiers", "<EMAIL>"),
("Verified Date", "01/22/2019"),
]))
self._check_csv_response(response, {})
self.assertEqual(
all_models.Assessment.query.get(assessment.id).verified_date,
datetime.datetime(2019, 1, 22))
def test_asmt_complete_verified(self):
"""Test assessment moved to Complete and Verified state"""
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
slug = assessment.slug
user = all_models.Person.query.first()
assessment.add_person_with_role_name(user, "Verifiers")
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", slug),
("State", "Completed"),
("Verified Date", "01/22/2019"),
]))
self._check_csv_response(response, {})
assmt = all_models.Assessment.query.one()
self.assertTrue(assmt.verified)
self.assertEqual(assmt.status, "Completed")
def test_asmt_verified_date_readonly(self):
"""Test that Verified Date is readonly"""
audit = factories.AuditFactory()
date = datetime.datetime(2019, 05, 22)
assessment = \
factories.AssessmentFactory(audit=audit,
verified_date=date)
expected_warnings = {
'Assessment': {
'row_warnings': {
errors.UNMODIFIABLE_COLUMN.format(
line=3,
column_name="Verified Date"
)}}}
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assessment.slug),
("Verifiers", "<EMAIL>"),
("Verified Date", "01/21/2019"),
]))
self._check_csv_response(response, expected_warnings)
self.assertEqual(
all_models.Assessment.query.get(assessment.id).verified_date,
date)
@ddt.data("<EMAIL>", "--")
def test_asmt_state_after_updating_verifiers(self, new_verifier):
"""Test that after updating Verifiers assessment became In Progress"""
audit = factories.AuditFactory()
assessment = \
factories.AssessmentFactory(audit=audit,
status=all_models.Assessment.DONE_STATE,
)
person = factories.PersonFactory(email="<EMAIL>")
factories.AccessControlPersonFactory(
ac_list=assessment.acr_name_acl_map["Verifiers"],
person=person,
)
self.assertEqual(
all_models.Assessment.query.get(assessment.id).status,
all_models.Assessment.DONE_STATE)
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assessment.slug),
("Verifiers", new_verifier),
]))
self.assertEqual(
all_models.Assessment.query.get(assessment.id).status,
all_models.Assessment.PROGRESS_STATE)
def test_import_asmnt_state_with_verifiers(self):
"""Assessment with Verifiers should update Status to In Review if we are
importing Completed state"""
with factories.single_commit():
assessment = factories.AssessmentFactory()
person = factories.PersonFactory()
factories.AccessControlPersonFactory(
ac_list=assessment.acr_name_acl_map["Verifiers"],
person=person,
)
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assessment.slug),
("State", all_models.Assessment.FINAL_STATE),
]))
self.assertEqual(
all_models.Assessment.query.get(assessment.id).status,
all_models.Assessment.DONE_STATE)
def test_import_asmnt_state_with_verifiers_and_date(self):
"""Assessment with Verifiers should update Status to Completed if we are
importing Completed state with filled Verified Date"""
with factories.single_commit():
assessment = factories.AssessmentFactory()
person = factories.PersonFactory()
factories.AccessControlPersonFactory(
ac_list=assessment.acr_name_acl_map["Verifiers"],
person=person,
)
self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assessment.slug),
("Verified Date", "11/20/2019"),
("State", all_models.Assessment.FINAL_STATE)
]))
asmnt = all_models.Assessment.query.get(assessment.id)
self.assertEqual(asmnt.status, all_models.Assessment.FINAL_STATE)
self.assertEqual(asmnt.verified_date, datetime.datetime(2019, 11, 20))
def test_assmt_with_multiselect_gca(self):
"""Import of assessment with multiselect CAD shouldn't add assmt.CAV"""
assess_slug = "TestAssessment"
with factories.single_commit():
# create 2 GCA's
cad_text = factories.CustomAttributeDefinitionFactory(
title="text_GCA",
definition_type="assessment",
attribute_type="Text",
)
factories.CustomAttributeDefinitionFactory(
title="multiselect_GCA",
definition_type="assessment",
attribute_type="Multiselect",
multi_choice_options="1,2,3"
)
# create assessment with 1 CAV
assessment = factories.AssessmentFactory(
slug=assess_slug,
)
factories.CustomAttributeValueFactory(
custom_attribute=cad_text,
attributable=assessment,
attribute_value="text",
)
assessment_id = assessment.id
# update given assessment with empty GCA multiselect type
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assess_slug),
("multiselect_GCA", ""),
]))
self._check_csv_response(response, {})
assessment = all_models.Assessment.query.get(assessment_id)
self.assertEquals(1, len(assessment.custom_attribute_values))
self.assertEquals(
"text", assessment.custom_attribute_values[0].attribute_value
)
def test_asmt_missing_mandatory_gca(self):
""""Import asmt with mandatory empty multiselect CAD"""
asmt_slug = "TestAssessment"
with factories.single_commit():
factories.CustomAttributeDefinitionFactory(
title="multiselect_GCA",
definition_type="assessment",
attribute_type="Multiselect",
multi_choice_options="1,2,3",
mandatory=True,
)
factories.AssessmentFactory(slug=asmt_slug)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", asmt_slug),
("multiselect_GCA", ""),
]))
expected_response = {
"Assessment": {
"row_errors": {
errors.MISSING_VALUE_ERROR.format(
column_name="multiselect_GCA",
line=3
),
},
},
}
self._check_csv_response(response, expected_response)
def test_asmt_with_multiselect_gca_diff_text(self):
""""Import asmt with mandatory diff case text multiselect CAD"""
asmt_slug = "TestAssessment"
with factories.single_commit():
factories.CustomAttributeDefinitionFactory(
title="multiselect_GCA",
definition_type="assessment",
attribute_type="Multiselect",
multi_choice_options="Option 1,Option 2,Option 3",
)
# create assessment with 1 CAV
asmt = factories.AssessmentFactory(slug=asmt_slug)
asmt_id = asmt.id
# update given assessment with empty GCA multiselect type
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", asmt_slug),
("multiselect_GCA", "option 1"),
]))
self._check_csv_response(response, {})
asmt = all_models.Assessment.query.get(asmt_id)
self.assertEquals(1, len(asmt.custom_attribute_values))
self.assertEquals(
"Option 1", asmt.custom_attribute_values[0].attribute_value
)
@ddt.data(
(
factories.IssueFactory,
"map:issue",
"<EMAIL>",
),
(
factories.ObjectiveFactory,
"map:objective versions",
"<EMAIL>",
),
)
@ddt.unpack
def test_asmt_state_updating_verifiers_with_map_fields(
self, map_factory, map_column_name, new_verifier
):
"""Test assessment In Progress after updating Verifiers and map fields"""
with factories.single_commit():
audit = factories.AuditFactory()
map_object = map_factory()
spanpshot = factories.SnapshotFactory(
parent=audit,
child_id=map_object.id,
child_type=map_object.__class__.__name__,
revision=factories.RevisionFactory()
)
assessment = factories.AssessmentFactory(
audit=audit,
status=all_models.Assessment.DONE_STATE,
)
person = factories.PersonFactory(email="<EMAIL>")
factories.RelationshipFactory(source=assessment, destination=spanpshot)
factories.AccessControlPersonFactory(
ac_list=assessment.acr_name_acl_map["Verifiers"],
person=person,
)
self.assertEqual(
all_models.Assessment.query.get(assessment.id).status,
all_models.Assessment.DONE_STATE)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assessment.slug),
("Verifiers", new_verifier),
(map_column_name, map_object.slug),
]))
expected_response = {
"Assessment": {
"row_warnings": {
errors.STATE_WILL_BE_IGNORED.format(line=3),
}
}
}
self._check_csv_response(response, expected_response)
assessment = all_models.Assessment.query.get(assessment.id)
verifiers = [v.email for v in assessment.verifiers]
self.assertEqual(assessment.status, all_models.Assessment.PROGRESS_STATE)
self.assertEqual(verifiers or [""], [new_verifier])
@ddt.data(
(
factories.IssueFactory,
"map:issue",
),
(
factories.ObjectiveFactory,
"map:objective versions",
),
)
@ddt.unpack
def test_asmt_state_updating_empty_verifiers_with_map_fields(
self, map_factory, map_column_name
):
"""Test assessment In Progress after updating empty Verifiers,map fields"""
with factories.single_commit():
audit = factories.AuditFactory()
map_object = map_factory()
spanpshot = factories.SnapshotFactory(
parent=audit,
child_id=map_object.id,
child_type=map_object.__class__.__name__,
revision=factories.RevisionFactory()
)
assessment = factories.AssessmentFactory(
audit=audit,
status=all_models.Assessment.DONE_STATE,
)
person = factories.PersonFactory(email="<EMAIL>")
factories.RelationshipFactory(source=assessment, destination=spanpshot)
factories.AccessControlPersonFactory(
ac_list=assessment.acr_name_acl_map["Verifiers"],
person=person,
)
self.assertEqual(
all_models.Assessment.query.get(assessment.id).status,
all_models.Assessment.DONE_STATE)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code", assessment.slug),
("Verifiers", "--"),
(map_column_name, map_object.slug),
]))
expected_response = {
"Assessment": {
"row_warnings": {
errors.STATE_WILL_BE_IGNORED.format(line=3),
}
}
}
self._check_csv_response(response, expected_response)
assessment = all_models.Assessment.query.get(assessment.id)
verifiers = [v.email for v in assessment.verifiers]
self.assertEqual(assessment.status, all_models.Assessment.PROGRESS_STATE)
self.assertEqual(verifiers or [""], [""])
@ddt.data(
(
("LCA1", "LCA2", "LCA3"),
("val1", "val2", "val3"),
("", "", ""),
{},
),
(
("LCA1", "LCA2", "LCA3"),
("val1", "val2", "val3"),
("", "val", ""),
{
"Assessment": {
"row_warnings": {
"Line 4: Object does not contain attribute 'LCA2'. "
"The value will be ignored.",
},
},
},
),
(
("LCA1", "LCA2", "LCA3", "LCA4"),
("val1", "val2", "val3", ""),
("", "", "", ""),
{
"Assessment": {
"block_warnings": {
"Line 2: Attribute 'lca4' does not exist. "
"Column will be ignored.",
},
},
},
),
)
@ddt.unpack
def test_import_assessments_with_lca(self, attrs, asmt1_vals, asmt2_vals,
exp_errors):
"""Test import file with two or more assessments, only one have lca"""
with factories.single_commit():
audit = factories.AuditFactory()
assessment1 = factories.AssessmentFactory(audit=audit)
assessment2 = factories.AssessmentFactory(audit=audit)
factories.CustomAttributeDefinitionFactory(
title=attrs[0],
definition_type='assessment',
definition_id=assessment1.id,
)
factories.CustomAttributeDefinitionFactory(
title=attrs[1],
definition_type='assessment',
definition_id=assessment1.id,
)
factories.CustomAttributeDefinitionFactory(
title=attrs[2],
definition_type='assessment',
definition_id=assessment1.id,
)
assessment_data1 = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment1.slug),
("Audit", audit.slug),
("Title", assessment1.title),
])
assessment_data2 = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment2.slug),
("Audit", audit.slug),
("Title", assessment2.title),
])
assessment_data1.update(
dict([(attrs[i], asmt1_vals[i]) for i in range(len(attrs))]))
assessment_data2.update(
dict([(attrs[i], asmt2_vals[i]) for i in range(len(attrs))]))
response = self.import_data(assessment_data1, assessment_data2)
self._check_csv_response(response, exp_errors)
@ddt.data((True, "yes", "Completed", "Completed"),
(False, "no", "Completed", "Completed"),
(True, "no", "Completed", "In Progress"),
(False, "yes", "Completed", "In Progress"))
@ddt.unpack
def test_assessment_status_import_checkbox_lca(self, init_value,
new_value, init_status,
expected_status):
"""Assessment should not change Status if we do not update Checkbox LCA"""
with factories.single_commit():
assessment = factories.AssessmentFactory(status=init_status)
assessment_id = assessment.id
cad = factories.CustomAttributeDefinitionFactory(
title="Checkbox LCA",
attribute_type="Checkbox",
definition_type='assessment',
definition_id=assessment_id,
)
factories.CustomAttributeValueFactory(
custom_attribute=cad,
attributable=assessment,
attribute_value=init_value,
)
assessment_data = collections.OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("Title", assessment.title),
("Checkbox LCA", new_value)
])
response = self.import_data(assessment_data)
self._check_csv_response(response, {})
assessment = self.refresh_object(assessment, assessment_id)
self.assertEqual(expected_status, assessment.status)
@ddt.ddt
class TestAssessmentExport(TestCase):
"""Test Assessment object export."""
def setUp(self):
""" Set up for Assessment test cases """
super(TestAssessmentExport, self).setUp()
self.client.get("/login")
self.headers = generator.ObjectGenerator.get_header()
def test_simple_export(self):
""" Test full assessment export with no warnings"""
assessment = factories.AssessmentFactory(title="Assessment 1")
assessment_slug = assessment.slug
data = [{
"object_name": "Assessment",
"filters": {
"expression": {}
},
"fields": "all",
}]
response = self.export_csv(data)
self.assertIn(',{},'.format(assessment_slug), response.data)
# pylint: disable=invalid-name
def assertColumnExportedValue(self, value, instance, column):
""" Assertion checks is value equal to exported instance column value."""
data = [{
"object_name": instance.__class__.__name__,
"fields": "all",
"filters": {
"expression": {
"text": str(instance.id),
"op": {
"name": "text_search",
}
},
},
}]
instance_dict = self.export_parsed_csv(data)[instance.type][0]
self.assertEqual(value, instance_dict[column])
# pylint: disable=invalid-name
def test_export_assessments_without_map_control(self):
"""Test export assessment without related control instance"""
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = all_models.Revision.query.filter(
all_models.Revision.resource_id == control.id,
all_models.Revision.resource_type == control.__class__.__name__
).order_by(
all_models.Revision.id.desc()
).first()
factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
self.assertColumnExportedValue("", assessment,
"map:control versions")
@ddt.data(True, False)
def test_export_map_control(self, with_map):
"""Test export assessment with and without related control instance"""
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = all_models.Revision.query.filter(
all_models.Revision.resource_id == control.id,
all_models.Revision.resource_type == control.__class__.__name__
).order_by(
all_models.Revision.id.desc()
).first()
with factories.single_commit():
snapshot = factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
if with_map:
factories.RelationshipFactory(source=snapshot, destination=assessment)
if with_map:
val = control.slug
else:
val = ""
self.assertColumnExportedValue(val, assessment, "map:control versions")
# pylint: disable=invalid-name
def test_export_with_map_control_mirror_relation(self):
"""Test export assessment with related control instance
relation assessment -> snapshot
"""
with factories.single_commit():
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = all_models.Revision.query.filter(
all_models.Revision.resource_id == control.id,
all_models.Revision.resource_type == control.__class__.__name__
).order_by(
all_models.Revision.id.desc()
).first()
snapshot = factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
factories.RelationshipFactory(destination=snapshot, source=assessment)
self.assertColumnExportedValue(control.slug, assessment,
"map:control versions")
# pylint: disable=invalid-name
def test_export_assessments_with_filters_and_conflicting_ca_names(self):
"""Test exporting assessments with conflicting custom attribute names."""
# also create an object level custom attribute with a name that clashes
# with a name of a "regular" attribute
assessment = factories.AssessmentFactory(title="No template Assessment 1")
assessment_slug = assessment.slug
assessment = all_models.Assessment.query.filter(
all_models.Assessment.slug == assessment_slug).first()
cad = all_models.CustomAttributeDefinition(
attribute_type=u"Text",
title=u"ca title",
definition_type=u"assessment",
definition_id=assessment.id
)
db.session.add(cad)
db.session.commit()
data = [{
"object_name": "Assessment",
"fields": ["slug", "title", "description", "status"],
"filters": {
"expression": {
"left": {
"left": "code",
"op": {"name": "~"},
"right": "ASSESSMENT"
},
"op": {"name": "AND"},
"right": {
"left": "title",
"op": {"name": "~"},
"right": "no template Assessment"
}
},
"keys": ["code", "title", "status"],
"order_by": {
"keys": [],
"order": "",
"compare": None
}
}
}]
response = self.export_csv(data)
self.assertIn(u"No template Assessment 1", response.data)
@ddt.data(
("Last Updated By", "<EMAIL>"),
("modified_by", "<EMAIL>"),
)
@ddt.unpack
def test_export_by_modified_by(self, field, email):
"""Test for creation assessment with mapped creator"""
slug = "TestAssessment"
with factories.single_commit():
factories.AssessmentFactory(
slug=slug,
modified_by=factories.PersonFactory(email=email),
)
data = [{
"object_name": "Assessment",
"fields": "all",
"filters": {
"expression": {
"left": field,
"op": {"name": "="},
"right": email
},
}
}]
resp = self.export_parsed_csv(data)["Assessment"]
self.assertEqual(1, len(resp))
self.assertEqual(slug, resp[0]["Code*"])
@ddt.data(
("", "In Review", "", True),
("", "In Review", "<EMAIL>", False),
("", "Rework Needed", "", True),
("12/27/2018", "Completed", "", True),
("", "Completed", "", False),
("12/27/2018", "Completed", "<EMAIL>", False),
("", "In Progress", "", False),
)
@ddt.unpack
def test_asmt_status_and_verifier(self, date, status, verifiers, warning):
"""Test assessment status validation requiring verifier"""
audit = factories.AuditFactory()
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Title", "Test title"),
("Audit", audit.slug),
("Creators", "<EMAIL>"),
("Assignees", "<EMAIL>"),
("Verifiers", verifiers),
("Verified Date", date),
("State", status),
]))
expected_warnings = {
'Assessment': {
'row_warnings': {
errors.NO_VERIFIER_WARNING.format(
line=3,
status=status
)}}}
if warning:
self._check_csv_response(response, expected_warnings)
else:
self._check_csv_response(response, {})
def test_import_assessment_without_verifiers(self):
"""Test import with change status and remove verifiers"""
asmt = factories.AssessmentFactory()
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmt.slug),
("State", "In Review"),
("Verifiers", "--")
]))
expected_errors = {
"Assessment": {
"row_warnings": {
errors.NO_VERIFIER_WARNING.format(line=3, status='In Review'),
}
}
}
self._check_csv_response(response, expected_errors)
@ddt.data(1, 2)
def test_import_assessment_with_verifiers(self, verifiers_num):
"""Test import with change status and remove verifiers"""
with factories.single_commit():
asmt = factories.AssessmentFactory(status="In Review")
for _ in range(verifiers_num):
user = factories.PersonFactory()
asmt.add_person_with_role_name(user, "Verifiers")
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", asmt.slug),
("State", "In Review"),
("Verifiers", "--")
]))
expected_errors = {
"Assessment": {
"row_warnings": {
errors.STATE_WILL_BE_IGNORED.format(line=3),
}
}
}
self._check_csv_response(response, expected_errors)
def test_import_assessment_with_deleted_template(self):
"""Test import with deleted template from exported assessment"""
with factories.single_commit():
audit = factories.AuditFactory()
assessment_template = factories.AssessmentTemplateFactory(audit=audit)
assessment = factories.AssessmentFactory(audit=audit)
factories.CustomAttributeDefinitionFactory(
title='test_attr',
definition_type='assessment_template',
definition_id=assessment_template.id,
)
factories.CustomAttributeDefinitionFactory(
title='test_attr',
definition_type='assessment',
definition_id=assessment.id,
)
db.session.delete(assessment_template)
response = self.import_data(collections.OrderedDict([
("object_type", "Assessment"),
("Code*", ""),
("Template", ""),
("Audit", audit.slug),
("Assignees", "<EMAIL>"),
("Creators", "<EMAIL>"),
("Title", "test-{id}Title".format(id=assessment.id)),
("test_attr", "asdfafs"),
]), dry_run=True)
self._check_csv_response(response, {})
|
StarcoderdataPython
|
198372
|
<gh_stars>100-1000
def up(config, database, semester, course):
database.execute("UPDATE threads SET deleted = false WHERE merged_thread_id <> -1")
|
StarcoderdataPython
|
3286731
|
<filename>lib/ansible/modules/network/fadcos/fadcos_route_static.py
#!/usr/bin/python
#
# This file is part of Ansible
#
#
#updata date:2019/03/12
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
"""
EXAMPLES = """
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fadcos.fadcos import fadcos_argument_spec, is_vdom_enable, get_err_msg, is_user_in_vdom
import json
def add_route_static(module, connection):
route_id = module.params['route_id']
desination = module.params['desination']
gateway = module.params['gateway']
distance = module.params['distance']
vdom = module.params['vdom']
payload = {'mkey': route_id,
'dest': desination,
'distance': distance,
'gw': gateway,
}
url = '/api/router_static'
if is_vdom_enable(connection):
url += '?vdom=' + vdom
code, response = connection.send_request(url, payload)
return code, response
def edit_route_static(module, payload, connection):
name = module.params['route_id']
url = '/api/router_static?mkey=' + name
if is_vdom_enable(connection):
vdom = module.params['vdom']
url += '&vdom=' + vdom
code, response = connection.send_request(url, payload, 'PUT')
return code, response
def get_route_static(module, connection):
name = module.params['route_id']
payload = {}
url = '/api/router_static'
if name:
url += '?mkey=' + name
if is_vdom_enable(connection):
vdom = module.params['vdom']
if name:
url += '&vdom=' + vdom
else:
url += '?vdom=' + vdom
code, response = connection.send_request(url, payload, 'GET')
return code, response
def delete_route_static(module, connection):
name = module.params['route_id']
payload = {}
url = '/api/router_static?mkey=' + name
if is_vdom_enable(connection):
vdom = module.params['vdom']
url += '&vdom=' + vdom
code, response = connection.send_request(url, payload, 'DELETE')
return code, response
def needs_update(module, data):
res = False
if module.params['desination'] and module.params['desination'] != data['dest']:
data['dest'] = module.params['desination']
res = True
if module.params['gateway'] and module.params['gateway'] != data['gw']:
data['gw'] = module.params['gateway']
res = True
if module.params['distance'] and module.params['distance'] != data['distance']:
data['distance'] = module.params['distance']
res = True
return res, data
def param_check(module, connection):
res = True
action = module.params['action']
err_msg = []
if (action == 'edit' or action == 'delete') and not module.params['route_id']:
err_msg.append('The route_id need to set.')
res = False
if module.params['route_id']:
try:
i = int(module.params['route_id'])
except ValueError:
err_msg.append('The route_id must be integer.')
res = False
if action == 'add' and not module.params['desination']:
err_msg.append('The desination need to set.')
res = False
if action == 'add' and not module.params['gateway']:
err_msg.append('The gateway must be set.')
res = False
if is_vdom_enable(connection) and not module.params['vdom']:
err_msg.append('The vdom is enable in system setting, vdom must be set.')
res = False
elif is_vdom_enable(connection) and module.params['vdom'] and not is_user_in_vdom(connection, module.params['vdom']):
err_msg.append('The user can not accsee the vdom ' + module.params['vdom'])
res = False
return res, err_msg
def main():
argument_spec = dict(
action=dict(type='str', required=True),
route_id=dict(type='str'),
desination=dict(type='str'),
gateway=dict(type='str'),
distance=dict(type='str'),
vdom=dict(type='str')
)
argument_spec.update(fadcos_argument_spec)
required_if = []
module = AnsibleModule(argument_spec=argument_spec, required_if=required_if)
connection = Connection(module._socket_path)
action = module.params['action']
result = {}
param_pass, param_err = param_check(module, connection)
if not param_pass:
result['err_msg'] = param_err
result['failed'] = True
elif action == 'add':
code, response = add_route_static(module, connection)
result['res'] = response
result ['changed'] = True
elif action == 'get':
code, response = get_route_static(module, connection)
result['res'] = response
elif action == 'edit':
code, data = get_route_static(module, connection)
if 'payload' in data.keys() and data['payload'] and type(data['payload']) is not int:
res, new_data = needs_update(module, data['payload'])
else:
result['failed'] = True
res = False
result['err_msg'] = 'Entry not found'
if res:
code, response = edit_route_static(module, new_data, connection)
result ['changed'] = True
result['res'] = response
elif action == 'delete':
code, data = get_route_static(module, connection)
if 'payload' in data.keys() and data['payload'] and type(data['payload']) is not int:
res, new_data = needs_update(module, data['payload'])
code, response = delete_route_static(module, connection)
result['res'] = response
result ['changed'] = True
else:
result['failed'] = True
res = False
result['err_msg'] = 'Entry not found'
else:
result['err_msg'] = 'error action: ' + action
result['failed'] = True
if 'res' in result.keys() and type(result['res']) is dict\
and type(result['res']['payload']) is int and result['res']['payload'] < 0:
result['err_msg'] = get_err_msg(connection, result['res']['payload'])
result ['changed'] = False
result['failed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
32609
|
import abc
from collections import OrderedDict
from .constants import RESULT_KEY_MAP
class ResultMessageBase(abc.ABC):
"""
Result message base class.
"""
@abc.abstractmethod
def get_content(self, custom_data=None):
"""
Get message content.
Args:
custom_data (dict): Any custom data.
Returns:
(dict): Message content.
"""
return {}
def get_options(self):
"""
Get message options.
Returns:
(dict): Message options.
"""
return {}
@staticmethod
def convert_result_to_readable(result):
"""
Convert result keys to convenient format.
Args:
result (OrderedDict): Raw result data.
Returns:
(OrderedDict): Converted result data.
"""
converted = OrderedDict()
for key, value in result.items():
if key in RESULT_KEY_MAP:
converted[RESULT_KEY_MAP[key]] = value
return converted
class FileResultMessageBase(ResultMessageBase):
"""
Build and sent result as document message.
"""
@abc.abstractmethod
def get_filename(self):
"""
Define filename.
Returns:
(str): Filename.
"""
return "output"
@abc.abstractmethod
def get_document(self, data):
"""
Build document to send.
Args:
data (dict): Data to build document.
Returns:
(file-like object): Document.
"""
return None
def get_content(self, custom_data=None):
content = {
"filename": self.get_filename(),
"document": self.get_document(custom_data or {}),
}
content.update(self.get_options())
return content
def send(self, bot, chat_id, custom_data=None):
"""
Send built message.
Args:
bot (instance): Bot.
chat_id (int): Chat ID.
custom_data (dict): Any custom data.
Returns: None.
"""
bot.send_document(
chat_id=chat_id,
**self.get_content(custom_data)
)
class TextResultMessageBase(ResultMessageBase):
"""
Build and sent result as text message.
"""
@abc.abstractmethod
def get_text(self, data):
"""
Build text to send.
Args:
data (dict): Data to build text.
Returns:
(str): Text.
"""
return ""
def get_content(self, custom_data=None):
content = {"text": self.get_text(custom_data or {})}
content.update(self.get_options())
return content
def send(self, bot, chat_id, custom_data=None):
"""
Send built message.
Args:
bot (instance): Bot.
chat_id (int): Chat ID.
custom_data (dict): Any custom data.
Returns: None.
"""
bot.send_message(
chat_id=chat_id,
**self.get_content(custom_data)
)
|
StarcoderdataPython
|
1790865
|
#!/usr/bin/env python
import argparse
import random
import socket
import sys
import urlparse
import json
from wsgiref.simple_server import make_server
TIMEZONE = "US/Central"
def validate_parameters(query_dict, parameters):
"""
Check parameters in query_dict using the parameters specified
:param query_dict: a dictionary with key / value pairs to test
:param parameters: a dictionary with parameter name / type
specifying the type of parameters in the query_dict
:return: true or false depending on whether the parameters are valid
"""
for key, val in parameters.iteritems():
if key not in query_dict:
return False
if val == int:
try:
int(query_dict[key][0])
except ValueError:
return False
elif val == bool:
try:
bool(query_dict[key][0])
except ValueError:
return False
return True
def delete_job(environ):
"""
Remove a job from being processed
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
response = {"status": 200,
"result": "success"}
status = '200 OK'
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str,
'jobid': int}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
if random.random() > 0.9:
# give an error in 10% of the cases
response = {'status': 500,
'result': "Server Error"}
return json.dumps(response), '500 Server Error'
return json.dumps(response), status
def get_user_params(environ):
"""
Get user id and security token from CGI query string
:param environ: dictionary with environment variables (See PEP 333)
:return: tuple with userid, security_token
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
if 'userid' not in query_dict or 'token' not in query_dict:
return '', ''
user_id = query_dict['userid']
token = query_dict['token']
return user_id, token
def validate_user(userid, token):
"""
Given an userid and security token, validate this against database
:param userid: string with user id
:param token: security token
:return: True if credentials are valid, false otherwise
"""
import random
if random.random() > 0.9:
# give an error in 10% of the cases
return False
return True
def get_current_jobs(environ):
"""
Get status for all jobs submitted by user in last week
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
userid, secret = get_user_params(environ)
if not validate_user(userid, secret):
response = {'status': 401,
'result': "invalid user"}
return json.dumps(response), '401 Not Authorized'
response = {'status': 200,
'jobs': [{'id': 1,
'input': 'subj_1.mgz',
'name': 'job_name1',
'status': 'PROCESSING',
'output': 'http://test.url/output_1.mgz'},
{'id': 23,
'input': 'subj_182.mgz',
'name': 'my_job2',
'status': 'COMPLETED',
'output': 'http://test.url/output_182.mgz'}]}
status = '200 OK'
return json.dumps(response), status
def submit_job(environ):
"""
Submit a job to be processed
TODO: placeholder for now
:param environ: dictionary with environment variables (See PEP 333)
:return: a tuple with response_body, status
"""
query_dict = urlparse.parse_qs(environ['QUERY_STRING'])
parameters = {'userid': str,
'token': str,
'filename': str,
'singlecore': bool,
'jobname': str}
if not validate_parameters(query_dict, parameters):
response = {'status': 400,
'result': "invalid or missing parameter"}
return json.dumps(response), '400 Bad Request'
if random.random() > 0.9:
# give an error in 10% of the cases
response = {'status': 500,
'result': "Server Error"}
return json.dumps(response), '500 Server Error'
response = {"status": 200,
"result": "success"}
return json.dumps(response), '200 OK'
def application(environ, start_response):
"""
Get parameters from GET request and publish to redis channel
:param environ: dictionary with environment variables (See PEP 333)
:param start_response: callable function to handle responses (see PEP 333)
:return: a list with the response_body to return to client
"""
if 'REQUEST_METHOD' not in environ:
response_body = "No request method"
response_headers = [('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))]
start_response('200 OK', response_headers)
print response_body
return [response_body]
if environ['REQUEST_METHOD'] == 'GET':
response_body, status = get_current_jobs(environ)
elif environ['REQUEST_METHOD'] == 'POST':
response_body, status = submit_job(environ)
elif environ['REQUEST_METHOD'] == 'DELETE':
response_body, status = delete_job(environ)
else:
response_body = '500 Server Error'
status = '500 Server Error'
response_headers = [('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
print response_body
return [response_body]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse request and act appropriately')
parser.add_argument('--host', dest='hostname', default=socket.getfqdn(),
help='hostname of server')
args = parser.parse_args(sys.argv[1:])
srv = make_server(args.hostname, 8080, application)
srv.serve_forever()
|
StarcoderdataPython
|
1654138
|
<reponame>selectel/python-selvpcclient
import pytest
from tests.cli import make_client, run_cmd
from tests.util import answers
from tests.util import params
def test_show_theme_b64():
client = make_client(return_value=answers.CUSTOMIZATION_SHOW)
args = ['customization show', '--show-base64']
output = run_cmd(args, client, json_output=True)
assert output["color"] == "00ffee"
assert output["logo"] == params.LOGO_BASE64
assert output["brand_color"] == "00ffee"
def test_show_no_theme_b64():
client = make_client(return_value=answers.CUSTOMIZATION_NO_THEME)
args = ['customization show', '--show-base64']
output = run_cmd(args, client, json_output=True)
assert output["color"] == ""
assert output["logo"] == ""
assert output["brand_color"] == ""
def test_show_theme_b64_short():
client = make_client(return_value=answers.CUSTOMIZATION_SHOW)
args = ['customization show', '--show-short-base64']
output = run_cmd(args, client, json_output=True)
assert output["color"] == "00ffee"
assert output["logo"] == params.LOGO_BASE64_SHORTEN
assert output["brand_color"] == "00ffee"
def test_show_no_theme_b64_short():
client = make_client(return_value=answers.CUSTOMIZATION_NO_THEME)
args = ['customization show', '--show-short-base64']
output = run_cmd(args, client, json_output=True)
assert output["color"] == ""
assert output["logo"] == ""
assert output["brand_color"] == ""
def test_show_theme():
client = make_client(return_value=answers.CUSTOMIZATION_SHOW)
args = ['customization show']
output = run_cmd(args, client, json_output=True)
assert output["color"] == "00ffee"
assert output["logo"] is True
assert output["brand_color"] == "00ffee"
def test_show_no_theme():
client = make_client(return_value=answers.CUSTOMIZATION_NO_THEME)
args = ['customization show']
output = run_cmd(args, client, json_output=True)
assert output["color"] == ""
assert output["logo"] is False
assert output["brand_color"] == ""
def test_update_theme():
client = make_client(return_value=answers.CUSTOMIZATION_UPDATE)
args = ['customization update',
'--color', '00eeff',
'--brand-color', '00ffee']
output = run_cmd(args, client, json_output=True)
assert output["color"] == "00eeff"
assert output["brand_color"] == "00ffee"
def test_customization_delete_without_confirm_flag():
client = make_client(return_value={})
args = ['customization delete']
with pytest.raises(SystemExit):
run_cmd(args, client)
|
StarcoderdataPython
|
1711485
|
horas_mensais = float(input('Horas mensais de trabalho: '))
ganho_por_hora = float(input('Valor por hora trabalhada: '))
salario_bruto = (horas_mensais * ganho_por_hora)
inss = (8/100) * (salario_bruto)
imposto_de_renda = (11/100) * (salario_bruto)
sindicato = (5/100) * (salario_bruto)
descontos = (inss + imposto_de_renda + sindicato)
salario_liquido = (salario_bruto - descontos)
print(f'Salário Bruto: R$ {salario_bruto}')
print(f'INSS: R$ {inss}')
print(f'IR: R$ {imposto_de_renda}')
print(f'Sindicato: R$ {sindicato}')
print(f'Salário Líquido: R$ {salario_liquido}')
|
StarcoderdataPython
|
1629778
|
""" Cisco_IOS_XR_crypto_sam_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR crypto\-sam package operational data.
This module contains definitions
for the following management objects\:
sam\: Software authentication manager certificate information
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class CertificateIssuerEnum(Enum):
"""
CertificateIssuerEnum
Certificate issuers
.. data:: unknown = 0
Issuer is not known
.. data:: code_signing_server_certificate_authority = 1
Issuer is code signing server certificate
authority
"""
unknown = 0
code_signing_server_certificate_authority = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['CertificateIssuerEnum']
class LogCodeEnum(Enum):
"""
LogCodeEnum
Log code types
.. data:: unknown = 0
Log code is not known
.. data:: sam_server_restared_router_reboot = 1
Log code is SAM server restarted through router
reboot
.. data:: sam_server_restared = 2
Log code is SAM server restarted
.. data:: added_certificate_in_table = 3
Log code is Added certificate in table
.. data:: copied_certificate_in_table = 4
Log code is Copied certificate in table
.. data:: certificate_flag_changed = 5
Log code is Certificate flag changed
.. data:: validated_certificate = 6
Log code is validated ceritificate
.. data:: certificate_expired_detected = 7
Log code is Ceritificate expired detected
.. data:: certificate_revoked_detected = 8
Log code is Ceritificate revoked detected
.. data:: ca_certificate_expired_detected = 9
Log code is CA Ceritificate expired detected
.. data:: ca_certificate_revoked_detected = 10
Log code is CA Ceritificate revoked detected
.. data:: deleted_certificate_from_table = 11
Log code is Deleted certificate from table
.. data:: crl_added_updated_in_table = 12
Log code is CRL added/updated in table
.. data:: checked_memory_digest = 13
Log code is Checked memory digest
.. data:: nvram_digest_mismatch_detected = 14
Log code is NVRAM digest Mistmatch detected
.. data:: insecure_backup_file_detected = 15
Log code is Insecure backup file detected
.. data:: error_restore_operation = 16
Log code is Error during restore operation,
backup file might have not been intact
.. data:: backup_file_on_nvram_deleted = 17
Log code is Found backup file on NVRAM for SAM
log had been deleted
.. data:: sam_log_file_recovered_from_system_database = 18
Log code is SAM log backup file recovered from
system database
.. data:: validated_elf = 19
Log code is validated ELF
.. data:: namespace_deleted_recovered_by_sam = 20
Log code is SAM system database name space
deleted/recovered by SAM
"""
unknown = 0
sam_server_restared_router_reboot = 1
sam_server_restared = 2
added_certificate_in_table = 3
copied_certificate_in_table = 4
certificate_flag_changed = 5
validated_certificate = 6
certificate_expired_detected = 7
certificate_revoked_detected = 8
ca_certificate_expired_detected = 9
ca_certificate_revoked_detected = 10
deleted_certificate_from_table = 11
crl_added_updated_in_table = 12
checked_memory_digest = 13
nvram_digest_mismatch_detected = 14
insecure_backup_file_detected = 15
error_restore_operation = 16
backup_file_on_nvram_deleted = 17
sam_log_file_recovered_from_system_database = 18
validated_elf = 19
namespace_deleted_recovered_by_sam = 20
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['LogCodeEnum']
class LogErrorEnum(Enum):
"""
LogErrorEnum
Log errors
.. data:: unknown = 0
Log error is not known
.. data:: log_message_error = 1
Log error is message error
.. data:: get_issuer_name_failed = 2
Log error is get issuer name failed
"""
unknown = 0
log_message_error = 1
get_issuer_name_failed = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['LogErrorEnum']
class LogTablesEnum(Enum):
"""
LogTablesEnum
Log tables
.. data:: unkown = 0
Table is not known
.. data:: memory_digest_table = 1
Table is memory digest table
.. data:: system_database_digest = 2
Table is system database digest table
.. data:: sam_tables = 3
Table is SAM table
"""
unkown = 0
memory_digest_table = 1
system_database_digest = 2
sam_tables = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['LogTablesEnum']
class Sam(object):
"""
Software authentication manager certificate
information
.. attribute:: certificate_revocation_list_summary
Certificate revocation list summary information
**type**\: :py:class:`CertificateRevocationListSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocationListSummary>`
.. attribute:: certificate_revocations
Certificate revocation list index table information
**type**\: :py:class:`CertificateRevocations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocations>`
.. attribute:: devices
Certificate device table information
**type**\: :py:class:`Devices <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices>`
.. attribute:: log_contents
SAM log content table information
**type**\: :py:class:`LogContents <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.LogContents>`
.. attribute:: packages
SAM certificate information package
**type**\: :py:class:`Packages <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Packages>`
.. attribute:: system_information
SAM system information
**type**\: :py:class:`SystemInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.SystemInformation>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.certificate_revocation_list_summary = Sam.CertificateRevocationListSummary()
self.certificate_revocation_list_summary.parent = self
self.certificate_revocations = Sam.CertificateRevocations()
self.certificate_revocations.parent = self
self.devices = Sam.Devices()
self.devices.parent = self
self.log_contents = Sam.LogContents()
self.log_contents.parent = self
self.packages = Sam.Packages()
self.packages.parent = self
self.system_information = Sam.SystemInformation()
self.system_information.parent = self
class SystemInformation(object):
"""
SAM system information
.. attribute:: is_default_response
True if promptdefault response is true
**type**\: bool
.. attribute:: is_running
True if SAM status information runs
**type**\: bool
.. attribute:: prompt_interval
Prompt interval atreboot time in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.is_default_response = None
self.is_running = None
self.prompt_interval = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:system-information'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.is_default_response is not None:
return True
if self.is_running is not None:
return True
if self.prompt_interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.SystemInformation']['meta_info']
class LogContents(object):
"""
SAM log content table information
.. attribute:: log_content
Number of lines for SAM log message
**type**\: list of :py:class:`LogContent <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.LogContents.LogContent>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.log_content = YList()
self.log_content.parent = self
self.log_content.name = 'log_content'
class LogContent(object):
"""
Number of lines for SAM log message
.. attribute:: number_of_lines <key>
Number of lines
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: entries_shown
Total entries shown
**type**\: int
**range:** 0..4294967295
.. attribute:: logs
SAM logs
**type**\: list of :py:class:`Logs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.LogContents.LogContent.Logs>`
.. attribute:: total_entries
Total log entries available
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.number_of_lines = None
self.entries_shown = None
self.logs = YList()
self.logs.parent = self
self.logs.name = 'logs'
self.total_entries = None
class Logs(object):
"""
SAM logs
.. attribute:: code
Log code
**type**\: :py:class:`LogCodeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.LogCodeEnum>`
.. attribute:: error
Log error message
**type**\: :py:class:`LogErrorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.LogErrorEnum>`
.. attribute:: index
Device index
**type**\: int
**range:** 0..4294967295
.. attribute:: issuer
Issuer of the certificate
**type**\: :py:class:`CertificateIssuerEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.CertificateIssuerEnum>`
.. attribute:: sam_table_index
SAM table index
**type**\: int
**range:** 0..4294967295
.. attribute:: serial_no
Serial number
**type**\: str
.. attribute:: source_device
source device name
**type**\: str
.. attribute:: table
Log table information
**type**\: :py:class:`LogTablesEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.LogTablesEnum>`
.. attribute:: target_device
Target device
**type**\: str
.. attribute:: time
Log time
**type**\: str
.. attribute:: update_time
Last update time of the certificate
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.code = None
self.error = None
self.index = None
self.issuer = None
self.sam_table_index = None
self.serial_no = None
self.source_device = None
self.table = None
self.target_device = None
self.time = None
self.update_time = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:logs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.code is not None:
return True
if self.error is not None:
return True
if self.index is not None:
return True
if self.issuer is not None:
return True
if self.sam_table_index is not None:
return True
if self.serial_no is not None:
return True
if self.source_device is not None:
return True
if self.table is not None:
return True
if self.target_device is not None:
return True
if self.time is not None:
return True
if self.update_time is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.LogContents.LogContent.Logs']['meta_info']
@property
def _common_path(self):
if self.number_of_lines is None:
raise YPYModelError('Key property number_of_lines is None')
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:log-contents/Cisco-IOS-XR-crypto-sam-oper:log-content[Cisco-IOS-XR-crypto-sam-oper:number-of-lines = ' + str(self.number_of_lines) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.number_of_lines is not None:
return True
if self.entries_shown is not None:
return True
if self.logs is not None:
for child_ref in self.logs:
if child_ref._has_data():
return True
if self.total_entries is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.LogContents.LogContent']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:log-contents'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.log_content is not None:
for child_ref in self.log_content:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.LogContents']['meta_info']
class Devices(object):
"""
Certificate device table information
.. attribute:: device
Certificate table device information
**type**\: list of :py:class:`Device <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices.Device>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.device = YList()
self.device.parent = self
self.device.name = 'device'
class Device(object):
"""
Certificate table device information
.. attribute:: device_name <key>
Specify device name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: certificate
Certificate table information
**type**\: :py:class:`Certificate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices.Device.Certificate>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.device_name = None
self.certificate = Sam.Devices.Device.Certificate()
self.certificate.parent = self
class Certificate(object):
"""
Certificate table information
.. attribute:: brief
Certificate table brief information
**type**\: :py:class:`Brief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices.Device.Certificate.Brief>`
.. attribute:: certificate_indexes
Certificate detail index table information
**type**\: :py:class:`CertificateIndexes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices.Device.Certificate.CertificateIndexes>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.brief = Sam.Devices.Device.Certificate.Brief()
self.brief.parent = self
self.certificate_indexes = Sam.Devices.Device.Certificate.CertificateIndexes()
self.certificate_indexes.parent = self
class Brief(object):
"""
Certificate table brief information
.. attribute:: certificate_flags
Certificate flags
**type**\: :py:class:`CertificateFlags <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices.Device.Certificate.Brief.CertificateFlags>`
.. attribute:: certificate_index
Certificate index
**type**\: int
**range:** 0..65535
.. attribute:: location
Certificate location
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.certificate_flags = Sam.Devices.Device.Certificate.Brief.CertificateFlags()
self.certificate_flags.parent = self
self.certificate_index = None
self.location = None
class CertificateFlags(object):
"""
Certificate flags
.. attribute:: is_expired
Expired flag
**type**\: bool
.. attribute:: is_revoked
Revoked flag
**type**\: bool
.. attribute:: is_trusted
Trusted flag
**type**\: bool
.. attribute:: is_validated
Validated flag
**type**\: bool
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.is_expired = None
self.is_revoked = None
self.is_trusted = None
self.is_validated = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate-flags'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.is_expired is not None:
return True
if self.is_revoked is not None:
return True
if self.is_trusted is not None:
return True
if self.is_validated is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device.Certificate.Brief.CertificateFlags']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:brief'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.certificate_flags is not None and self.certificate_flags._has_data():
return True
if self.certificate_index is not None:
return True
if self.location is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device.Certificate.Brief']['meta_info']
class CertificateIndexes(object):
"""
Certificate detail index table information
.. attribute:: certificate_index
Certificate detail index information
**type**\: list of :py:class:`CertificateIndex <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices.Device.Certificate.CertificateIndexes.CertificateIndex>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.certificate_index = YList()
self.certificate_index.parent = self
self.certificate_index.name = 'certificate_index'
class CertificateIndex(object):
"""
Certificate detail index information
.. attribute:: index <key>
Specify certificate index
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: detail
Certificate table detail information
**type**\: :py:class:`Detail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices.Device.Certificate.CertificateIndexes.CertificateIndex.Detail>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.index = None
self.detail = Sam.Devices.Device.Certificate.CertificateIndexes.CertificateIndex.Detail()
self.detail.parent = self
class Detail(object):
"""
Certificate table detail information
.. attribute:: certificate_flags
Certificate flags
**type**\: :py:class:`CertificateFlags <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Devices.Device.Certificate.CertificateIndexes.CertificateIndex.Detail.CertificateFlags>`
.. attribute:: certificate_index
Certificate index
**type**\: int
**range:** 0..65535
.. attribute:: location
Certificate location
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.certificate_flags = Sam.Devices.Device.Certificate.CertificateIndexes.CertificateIndex.Detail.CertificateFlags()
self.certificate_flags.parent = self
self.certificate_index = None
self.location = None
class CertificateFlags(object):
"""
Certificate flags
.. attribute:: is_expired
Expired flag
**type**\: bool
.. attribute:: is_revoked
Revoked flag
**type**\: bool
.. attribute:: is_trusted
Trusted flag
**type**\: bool
.. attribute:: is_validated
Validated flag
**type**\: bool
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.is_expired = None
self.is_revoked = None
self.is_trusted = None
self.is_validated = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate-flags'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.is_expired is not None:
return True
if self.is_revoked is not None:
return True
if self.is_trusted is not None:
return True
if self.is_validated is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device.Certificate.CertificateIndexes.CertificateIndex.Detail.CertificateFlags']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:detail'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.certificate_flags is not None and self.certificate_flags._has_data():
return True
if self.certificate_index is not None:
return True
if self.location is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device.Certificate.CertificateIndexes.CertificateIndex.Detail']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.index is None:
raise YPYModelError('Key property index is None')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate-index[Cisco-IOS-XR-crypto-sam-oper:index = ' + str(self.index) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.index is not None:
return True
if self.detail is not None and self.detail._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device.Certificate.CertificateIndexes.CertificateIndex']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate-indexes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.certificate_index is not None:
for child_ref in self.certificate_index:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device.Certificate.CertificateIndexes']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.brief is not None and self.brief._has_data():
return True
if self.certificate_indexes is not None and self.certificate_indexes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device.Certificate']['meta_info']
@property
def _common_path(self):
if self.device_name is None:
raise YPYModelError('Key property device_name is None')
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:devices/Cisco-IOS-XR-crypto-sam-oper:device[Cisco-IOS-XR-crypto-sam-oper:device-name = ' + str(self.device_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.device_name is not None:
return True
if self.certificate is not None and self.certificate._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:devices'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.device is not None:
for child_ref in self.device:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices']['meta_info']
class Packages(object):
"""
SAM certificate information package
.. attribute:: package
SAM certificate information for a specific package
**type**\: list of :py:class:`Package <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Packages.Package>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.package = YList()
self.package.parent = self
self.package.name = 'package'
class Package(object):
"""
SAM certificate information for a specific
package
.. attribute:: package_name <key>
Specify package name
**type**\: str
.. attribute:: certificate_flags
Certificate flags
**type**\: :py:class:`CertificateFlags <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Packages.Package.CertificateFlags>`
.. attribute:: certificate_index
Certificate index
**type**\: int
**range:** 0..65535
.. attribute:: location
Certificate location
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.package_name = None
self.certificate_flags = Sam.Packages.Package.CertificateFlags()
self.certificate_flags.parent = self
self.certificate_index = None
self.location = None
class CertificateFlags(object):
"""
Certificate flags
.. attribute:: is_expired
Expired flag
**type**\: bool
.. attribute:: is_revoked
Revoked flag
**type**\: bool
.. attribute:: is_trusted
Trusted flag
**type**\: bool
.. attribute:: is_validated
Validated flag
**type**\: bool
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.is_expired = None
self.is_revoked = None
self.is_trusted = None
self.is_validated = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate-flags'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.is_expired is not None:
return True
if self.is_revoked is not None:
return True
if self.is_trusted is not None:
return True
if self.is_validated is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Packages.Package.CertificateFlags']['meta_info']
@property
def _common_path(self):
if self.package_name is None:
raise YPYModelError('Key property package_name is None')
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:packages/Cisco-IOS-XR-crypto-sam-oper:package[Cisco-IOS-XR-crypto-sam-oper:package-name = ' + str(self.package_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.package_name is not None:
return True
if self.certificate_flags is not None and self.certificate_flags._has_data():
return True
if self.certificate_index is not None:
return True
if self.location is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Packages.Package']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:packages'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.package is not None:
for child_ref in self.package:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Packages']['meta_info']
class CertificateRevocations(object):
"""
Certificate revocation list index table
information
.. attribute:: certificate_revocation
Certificate revocation list index information
**type**\: list of :py:class:`CertificateRevocation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocations.CertificateRevocation>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.certificate_revocation = YList()
self.certificate_revocation.parent = self
self.certificate_revocation.name = 'certificate_revocation'
class CertificateRevocation(object):
"""
Certificate revocation list index information
.. attribute:: crl_index <key>
CRL index
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: certificate_revocation_list_detail
Certificate revocation list detail information
**type**\: :py:class:`CertificateRevocationListDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.crl_index = None
self.certificate_revocation_list_detail = Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail()
self.certificate_revocation_list_detail.parent = self
class CertificateRevocationListDetail(object):
"""
Certificate revocation list detail information
.. attribute:: crl_index
CRL index
**type**\: int
**range:** 0..65535
.. attribute:: issuer
Issuer name
**type**\: :py:class:`Issuer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail.Issuer>`
.. attribute:: updates
Updated time of CRL is displayed
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.crl_index = None
self.issuer = Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail.Issuer()
self.issuer.parent = self
self.updates = None
class Issuer(object):
"""
Issuer name
.. attribute:: common_name
Common name
**type**\: str
.. attribute:: country
Country
**type**\: str
.. attribute:: organization
Organization
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.common_name = None
self.country = None
self.organization = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:issuer'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.common_name is not None:
return True
if self.country is not None:
return True
if self.organization is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail.Issuer']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate-revocation-list-detail'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.crl_index is not None:
return True
if self.issuer is not None and self.issuer._has_data():
return True
if self.updates is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail']['meta_info']
@property
def _common_path(self):
if self.crl_index is None:
raise YPYModelError('Key property crl_index is None')
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:certificate-revocations/Cisco-IOS-XR-crypto-sam-oper:certificate-revocation[Cisco-IOS-XR-crypto-sam-oper:crl-index = ' + str(self.crl_index) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.crl_index is not None:
return True
if self.certificate_revocation_list_detail is not None and self.certificate_revocation_list_detail._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocations.CertificateRevocation']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:certificate-revocations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.certificate_revocation is not None:
for child_ref in self.certificate_revocation:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocations']['meta_info']
class CertificateRevocationListSummary(object):
"""
Certificate revocation list summary information
.. attribute:: crl_index
CRL index
**type**\: int
**range:** 0..65535
.. attribute:: issuer
Issuer name
**type**\: :py:class:`Issuer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocationListSummary.Issuer>`
.. attribute:: updates
Updated time of CRL is displayed
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.crl_index = None
self.issuer = Sam.CertificateRevocationListSummary.Issuer()
self.issuer.parent = self
self.updates = None
class Issuer(object):
"""
Issuer name
.. attribute:: common_name
Common name
**type**\: str
.. attribute:: country
Country
**type**\: str
.. attribute:: organization
Organization
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.common_name = None
self.country = None
self.organization = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:certificate-revocation-list-summary/Cisco-IOS-XR-crypto-sam-oper:issuer'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.common_name is not None:
return True
if self.country is not None:
return True
if self.organization is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocationListSummary.Issuer']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:certificate-revocation-list-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.crl_index is not None:
return True
if self.issuer is not None and self.issuer._has_data():
return True
if self.updates is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocationListSummary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.certificate_revocation_list_summary is not None and self.certificate_revocation_list_summary._has_data():
return True
if self.certificate_revocations is not None and self.certificate_revocations._has_data():
return True
if self.devices is not None and self.devices._has_data():
return True
if self.log_contents is not None and self.log_contents._has_data():
return True
if self.packages is not None and self.packages._has_data():
return True
if self.system_information is not None and self.system_information._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam']['meta_info']
|
StarcoderdataPython
|
26124
|
<reponame>lamenezes/certificator<filename>certificator/meetup/__init__.py
from datetime import datetime as dt
from .client import MeetupClient
from ..certificator import BaseCertificator
from .models import Event
class MeetupCertificator(BaseCertificator):
def __init__(self, urlname, event_id, api_key, **kwargs):
super().__init__(**kwargs)
self.urlname = urlname
self.event_id = event_id
self.client = MeetupClient(api_key=api_key)
@property
def certificate_data(self):
attendances = self.client.get_attendances(self.urlname, self.event_id)
return ({'name': attendance['member']['name']} for attendance in attendances)
@property
def meta(self):
event_data = self.client.get_event(self.urlname, self.event_id)
event = Event(**event_data)
event.clean()
return {
'city': event.venue['city'],
'date': dt.strftime(event.date, '%d/%m/%Y'),
'full_date': event.full_date,
'organizer': event.group['name'],
'place': event.venue['name'],
'title': event.name,
'workload': event.duration,
}
|
StarcoderdataPython
|
1718214
|
# Copyright 2018 Samsung Electronics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class TroveBaseProperties(object):
ID = 'id'
NAME = 'name'
UPDATE_TIMESTAMP = 'updated'
PROJECT_ID = 'tenant_id'
class TroveInstanceProperties(TroveBaseProperties):
STATE = 'status'
SERVER_ID = 'server_id'
class TroveClusterProperties(TroveBaseProperties):
STATE = ('task', 'name')
INSTANCES = 'instances'
|
StarcoderdataPython
|
3350440
|
<filename>slackbot/plugins/secrefs.py
__author__ = 'jayk'
import re
from slackbot.bot import respond_to
from slackbot.bot import listen_to
@respond_to('secbot', re.IGNORECASE)
def hello_reply(message):
message.reply('hello human!')
@respond_to('what is (.*)')
def whatis(message, something):
message.reply('Here is {}'.format(something))
|
StarcoderdataPython
|
3300704
|
# pylint: skip-file
from data import mnist_iterator
import mxnet as mx
import numpy as np
import logging
# define mlp
use_torch_criterion = False
data = mx.symbol.Variable('data')
fc1 = mx.symbol.TorchModule(data_0=data, lua_string='nn.Linear(784, 128)', num_data=1, num_params=2, num_outputs=1, name='fc1')
act1 = mx.symbol.TorchModule(data_0=fc1, lua_string='nn.ReLU(false)', num_data=1, num_params=0, num_outputs=1, name='relu1')
fc2 = mx.symbol.TorchModule(data_0=act1, lua_string='nn.Linear(128, 64)', num_data=1, num_params=2, num_outputs=1, name='fc2')
act2 = mx.symbol.TorchModule(data_0=fc2, lua_string='nn.ReLU(false)', num_data=1, num_params=0, num_outputs=1, name='relu2')
fc3 = mx.symbol.TorchModule(data_0=act2, lua_string='nn.Linear(64, 10)', num_data=1, num_params=2, num_outputs=1, name='fc3')
if use_torch_criterion:
logsoftmax = mx.symbol.TorchModule(data_0=fc3, lua_string='nn.LogSoftMax()', num_data=1, num_params=0, num_outputs=1, name='logsoftmax')
# Torch's label starts from 1
label = mx.symbol.Variable('softmax_label') + 1
mlp = mx.symbol.TorchCriterion(data=logsoftmax, label=label, lua_string='nn.ClassNLLCriterion()', name='softmax')
else:
mlp = mx.symbol.SoftmaxOutput(data=fc3, name='softmax')
# data
train, val = mnist_iterator(batch_size=100, input_shape = (784,))
# train
logging.basicConfig(level=logging.DEBUG)
model = mx.model.FeedForward(
ctx = mx.cpu(0), symbol = mlp, num_epoch = 20,
learning_rate = 0.1, momentum = 0.9, wd = 0.00001)
if use_torch_criterion:
model.fit(X=train, eval_data=val, eval_metric=mx.metric.Torch())
else:
model.fit(X=train, eval_data=val)
|
StarcoderdataPython
|
1721582
|
# Generated with LimitStateCategory
#
from enum import Enum
from enum import auto
class LimitStateCategory(Enum):
""""""
SLS = auto()
ULS = auto()
ALS = auto()
def label(self):
if self == LimitStateCategory.SLS:
return "SLS"
if self == LimitStateCategory.ULS:
return "ULS"
if self == LimitStateCategory.ALS:
return "ALS"
|
StarcoderdataPython
|
3217708
|
# -*- coding: utf-8 -*-
"""
@author: anonymous
"""
import json
train1 = {'simulation':{
'num_simulations' : 10,
'simulation_index_start' : 0,
'N' : 10,
'K' : 20,
'R_defined' : 400,
'min_dist' : 35,
'dcor' : 10,
'T' : 20e-3,
'total_samples' : 100000,
'isTrain' : True,
'equal_number_for_BS' : True},
'train_episodes':{
'T_train' : 5000,
'T_sleep' : 5000,
'cell_passing_training' : True,
'cell_passing_sleeping' : True,
'T_register' : 50},
'mobility_params':{
'v_c' : 3e8,
'f_c' : 1e9,
'v_max' : 5.0,
'a_max' : 0.5,
'alpha_angle_rad' : 0.175,
'T_mobility' : 50,
'max_doppler' : None},
'dqn':{
'N_neighbors' : 5,
'scale_R_inner' : 0.75,
'scale_R_interf' : 2.5,
'n_hiddens' : [200,100,40],
'neightresh' : 5,
'discount_factors' : [0.5],
'learning_rate_0' : 0.01,
'learning_rate_decay' : 1-1E-4,
'learning_rate_min' : 0.0,
'target_update_count' : 100,
'time_slot_to_pass_weights' : 50,
'max_epsilon' : 0.25,
'epsilon_decay' : 1-1E-4,
'min_epsilon' : 0.01,
'batch_size' : 256,
'memory_per_agent' : 1000}}
print(json.dumps(train1,indent=4))
|
StarcoderdataPython
|
91467
|
import tkinter as tk
from tkinter import ttk
from collections import deque
class Timer(ttk.Frame):
"""parent is the frame which contains the timer frame self is the object whose properties are being created
and controller is the class whose properties are inherited....tk.Frame properties are also inherited"""
def __init__(self, parent, controller, show_settings):
super().__init__(parent)
self['style'] = 'Background.TFrame'
# setting the object as the controller
self.controller = controller
pomodoro_time = int(controller.pomodoro.get())
# variable to hold the current time with default value
self.current_time = tk.StringVar(value=f'{pomodoro_time:02d}:00')
# variable to hold the current phase of the timer_Schedule
self.current_timer_label = tk.StringVar(value=controller.timer_schedule[0])
# timer_running variable with boolean value false as timer is initially off
# it will start after clicking start button
self.timer_running = False
# private variable to stop the execution of after method in decrement method
self._timer_decrement_job = None
# label showing the current phase
timer_description = ttk.Label(
self,
textvariable=self.current_timer_label,
style='LightText.TLabel'
)
timer_description.grid(row=0, column=0, sticky='W', padx=(10, 0), pady=(10, 0))
# button to witch frame from timer to settings frame
settings_button = ttk.Button(
self,
text='Settings',
command=show_settings,
style='PomodoroButton.TButton',
cursor='hand2'
)
settings_button.grid(row=0, column=1, sticky='E', padx=10, pady=10)
timer_frame = ttk.Frame(self, height='100', style='Timer.TFrame')
timer_frame.grid(row=1, column=0, columnspan=2, pady=(10, 0), sticky='NSEW')
# counter label in timer_frame
timer_counter = ttk.Label(timer_frame,
textvariable=self.current_time,
style='TimerText.TLabel',
)
timer_counter.place(relx=0.5, rely=0.5, anchor='center') # positioning method like grid
# Button containing frame
button_container = ttk.Frame(self, padding=100, style='Background.TFrame')
button_container.grid(row=2, column=0, columnspan=2, sticky='EW')
button_container.columnconfigure((0, 1, 2), weight=1)
self.start_button = ttk.Button(
button_container,
text='Start',
command=self.start_timer,
style='PomodoroButton.TButton',
cursor='hand2' # change the appearance of cursor on the button
)
self.start_button.grid(row=0, column=0, sticky='EW')
self.stop_button = ttk.Button(
button_container,
text='Stop',
state='disabled', # initially off
command=self.stop_timer,
style='PomodoroButton.TButton',
cursor='hand2'
)
self.stop_button.grid(row=0, column=1, sticky='EW', padx=5)
"""self not used with reset_button and rest_timer because we don't want to use them out of this class"""
reset_button = ttk.Button(
button_container,
text='Reset',
command=self.reset_timer,
style='PomodoroButton.TButton',
cursor='hand2'
)
reset_button.grid(row=0, column=2, sticky='EW')
def start_timer(self):
self.timer_running = True # setting the timer status on after clicking start
self.start_button['state'] = 'disabled' # disables the start button after start of timer
self.stop_button['state'] = 'enabled' # enable the stop button after start of timer which was initially disable
self.decrement_time()
def stop_timer(self):
self.timer_running = False # on click of stop ,off the timer
self.stop_button['state'] = 'disabled' # disables the stop button after the click
self.start_button['state'] = 'enabled' # enables the start button after the start of button
if self._timer_decrement_job: # when the _timer_decrement_job found
self.after_cancel(self._timer_decrement_job) # cancel the further execution
self._timer_decrement_job = None # set the value of the _timer_decrement_job to None
def reset_timer(self):
self.stop_timer()
pomodoro_time = int(self.controller.pomodoro.get()) # getting value of pomodoro time from pomodoro class
self.current_time.set(f'{pomodoro_time:02d}:00') # set the current time to 25 after click of button
self.controller.timer_schedule = deque(self.controller.timer_order) # change timer schedule to initial state
self.current_timer_label.set(self.controller.timer_schedule[0]) # update timer label with first value of queue
def decrement_time(self):
"""This function reducing or updating the label every second"""
current_time = self.current_time.get()
if self.timer_running and current_time != '00:00': # timer is running
minutes, seconds = current_time.split(':') # splitting the string values into two variables
if int(seconds) > 0: # never let seconds be negative
seconds = int(seconds)-1
minutes = int(minutes)
else: # sets the timer to max after reaching zero
seconds = 59
minutes = int(minutes)-1
# setting the label value
self.current_time.set(f'{minutes:02d}:{seconds:02d}')
# calling the decrement function repeatedly after a second
self._timer_decrement_job = self.after(1000, self.decrement_time)
elif self.timer_running and current_time == '00:00':
self.controller.timer_schedule.rotate(-1) # rotate the list in reverse
next_up = self.controller.timer_schedule[0] # put the last element at first
# variable constantly updating the phase of scheduler after each phase changes
self.current_timer_label.set(next_up)
# checking which element is now at first position in task_order
# setting the current time accordingly
if next_up == 'Pomodoro':
pomodoro_time = int(self.controller.pomodoro.get())
self.current_time.set(f'{pomodoro_time:02d}:00')
elif next_up == 'Short Break':
short_break_time = int(self.controller.short_break.get())
self.current_time.set(f'{short_break_time:02d}:00')
elif next_up == 'Long Break':
long_break_time = int(self.controller.long_break.get())
self.current_time.set(f'{long_break_time:02d}:00')
self._timer_decrement_job = self.after(1000, self.decrement_time)
|
StarcoderdataPython
|
1608727
|
from .Feed import *
from .FPFeed import *
|
StarcoderdataPython
|
14635
|
<reponame>ezeakeal/glider_drone
from unittest import TestCase
from glider.modules.glider_radio import GliderRadio
class TestGliderRadio(TestCase):
def setUp(self):
self.radio = GliderRadio(self.test_callback)
self.radio.start()
def tearDown(self):
self.radio.stop()
def test_callback(self, msgdict):
print("Received message: %s" % msgdict)
def test_send_data(self):
self.radio.send_data(["test"])
|
StarcoderdataPython
|
1619666
|
<reponame>vishalbelsare/emmental-tutorials<gh_stars>10-100
import logging
import torchvision
from eda.image.datasets import ALL_DATASETS
from emmental.data import EmmentalDataLoader
logger = logging.getLogger(__name__)
def get_dataloaders(args):
train_dataset = torchvision.datasets.__dict__[args.task.upper()](
root=args.data, train=True, download=True
)
test_dataset = torchvision.datasets.__dict__[args.task.upper()](
root=args.data, train=False, download=True
)
dataloaders = []
datasets = {}
for split in ["train", "test"]:
if split == "train":
datasets[split] = ALL_DATASETS[args.task](
args.task,
train_dataset,
split,
index=None,
prob_label=True,
k=args.augment_k,
)
elif split == "test":
datasets[split] = ALL_DATASETS[args.task](args.task, test_dataset, split)
for split, dataset in datasets.items():
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict={args.task: "labels"},
dataset=dataset,
split=split,
shuffle=True if split in ["train"] else False,
batch_size=args.batch_size
if split in args.train_split or args.valid_batch_size is None
else args.valid_batch_size,
num_workers=4,
)
)
logger.info(
f"Built dataloader for {args.task} {split} set with {len(dataset)} "
f"samples (Shuffle={split in args.train_split}, "
f"Batch size={dataloaders[-1].batch_size})."
)
return dataloaders
|
StarcoderdataPython
|
4837816
|
# -*- coding: utf-8 -*-
from gevent import monkey
monkey.patch_all()
import gevent
import gevent.pool
import time
import random
import logging
_handler = logging.StreamHandler()
_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(module)s:%(lineno)s %(message)s'))
log = logging.getLogger('TEST')
log.addHandler(_handler)
log.setLevel(logging.DEBUG)
def PrintNumber(num):
log.info('num: {}'.format(num))
def main():
pool = gevent.pool.Pool(100)
index = 0
__T_Time = time.time()
while True:
index += 1
__T_elapsed = time.time() - __T_Time
# 计算剩余时间
__T_leftToWait = GLB_minInterval - __T_elapsed
if __T_leftToWait:
gevent.sleep(__T_leftToWait)
pool.spawn(PrintNumber, index)
__T_Time = time.time()
pool.join()
def init():
global GLB_maxSpeed, GLB_minInterval
GLB_maxSpeed = random.randint(1, 10)
GLB_minInterval = 1.00 / float(GLB_maxSpeed)
log.info("Sync speed: {}, Interval: {}".format(GLB_maxSpeed, GLB_minInterval))
def speed():
while True:
init()
gevent.sleep(30)
if __name__ == '__main__':
x = [x for x in range(2000000)]
init()
gevent.spawn(speed)
main()
|
StarcoderdataPython
|
3374956
|
<filename>scripts/check_literalincludes.py
import sys
import subprocess
def literalinclude_blocks():
literalincludes = subprocess.check_output(
["git", "grep", "-A", "10", ".. literalinclude::"]
)
literalincludes = literalincludes.decode()
section = []
for line in "\n".join(literalincludes.split("\n--\n")).split("\n"):
if not line.strip():
continue
# If literalinclude is in the output git grep will separate with :
# instead of -
if ".. literalinclude::" in line:
line = line.split(":", maxsplit=1)
else:
line = line.split("-", maxsplit=1)
# For blank lines
if section and (len(line) != 2 or not line[1]):
yield section
section = []
continue
contains_literalinclude, filecontents = line
if ".. literalinclude::" in filecontents:
section = [contains_literalinclude, [filecontents]]
elif section:
section[1].append(filecontents)
def main():
# Map filenames that might have changed to the file which references it's
# line numbers
check_changed = {}
for contains_literalinclude, lines in literalinclude_blocks():
# Skip blocks that don't reference specific lines
if not ":lines:" in "\n".join(lines):
continue
# Grab the file being referenced
# Remove /../ used by sphinx docs to reference files outside docs dir
referenced = lines[0].split()[-1].replace("/../", "", 1)
check_changed.setdefault(referenced, {})
check_changed[referenced].setdefault(contains_literalinclude, False)
# Get the list of changed files
changed_files = subprocess.check_output(
["git", "diff-index", "origin/main"]
)
changed_files = changed_files.decode()
changed_files = list(
map(
lambda line: line.split()[-1],
filter(bool, changed_files.split("\n")),
)
)
rm_paths = []
for filepath in check_changed:
if not filepath in changed_files:
rm_paths.append(filepath)
for filepath in rm_paths:
del check_changed[filepath]
for filepath in check_changed:
if filepath in changed_files:
for has_been_updated in check_changed[filepath]:
if has_been_updated in changed_files:
check_changed[filepath][has_been_updated] = True
# Fail if any are referenced_by
fail = False
for referenced_changed, should_have_changed in check_changed.items():
for filepath, was_changed in should_have_changed.items():
if not was_changed:
fail = True
print(
f"{filepath!r} might need updating as line numbers of "
+ f"{referenced_changed!r} may have changed"
)
if not fail:
return
print(
"This script checks to see if any .py files changed that are "
"referenced in .rst files using specific line numbers. This script "
"is failing because those rst files were not also modified. "
"This script is not smart enough to tell if those lines should be "
"modified, or that you modifed them correctly. Just as an extra "
"sanity check."
)
print()
print(
"You'll notice that the literalinclude blocks might end up with the "
"text in them being off because the :lines: wasn't changed. That's "
"what this script is hoping to catch."
)
sys.exit(1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3272410
|
from setuptools import setup, find_packages
with open("README.rst", "r") as fh:
long_description = fh.read()
setup_params = dict(
name = 'pysqream',
version = '3.0.0',
description = 'DB-API connector for SQreamDB',
long_description=long_description,
url="https://github.com/SQream/pysqream",
author="SQream Technologies",
author_email="<EMAIL>",
packages = ['pysqream'],
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
keywords='database db-api sqream sqreamdb'
'''
# install_requires=['sqlalchemy'],
# package_dir = {'': 'pysqream'},
entry_points={
'sqlalchemy.dialects':
['sqream = pysqream.dialect:SqreamDialect']
},
# sqream://sqream:sqream@localhost/master
# sqream+sqream_dialect://sqream:sqream@localhost/master
'''
)
if __name__ == '__main__':
setup(**setup_params)
|
StarcoderdataPython
|
188975
|
# -*- coding: utf-8 -*-
"""lhs_opt.py: Module to generate design matrix from an optimized
Latin Hypercube design
"""
import numpy as np
from . import lhs
__author__ = "<NAME>"
def create_ese(n: int, d: int, seed: int, max_outer: int,
obj_function: str="w2_discrepancy",
threshold_init: float=0,
num_exchanges: int=0,
max_inner: int = 0,
improving_params: list = [0.1, 0.8],
exploring_params: list = [0.1, 0.8, 0.9, 0.7]) -> np.ndarray:
"""Generate an optimized LHS using Enhanced Stochastic Evolutionary Alg.
The default parameters of the optimization can be overridden, if necessary.
:param n: the number of samples
:param d: the number of dimension
:param seed: the random seed number
:param max_outer: the maximum number of outer iterations
:param obj_function: the objective function to optimize
:param threshold_init: the initial threshold
:param num_exchanges: the number of candidates in perturbation step
:param max_inner: the maximum number of inner iterations
:param improving_params: the 2 parameters used in improve process
(a) the cut-off value to decrease the threshold
(b) the multiplier to decrease or increase the threshold
:param exploring_params: the 4 parameters used in explore process
(a) the cut-off value of acceptance, start increasing the threshold
(b) the cut-off value of acceptance, start decreasing the threshold
(c) the cooling multiplier for the threshold
(d) the warming multiplier for the threshold
"""
from .opt_alg.stochastic_evolutionary import optimize
# If dimension is less than 2, abort optimization
if d < 2:
raise ValueError("Dimension less than 2, optimization irrelevant!")
if seed is not None:
np.random.seed(seed)
# Create initial LHD sample
dm = lhs.create(n, d, seed=seed)
# Optimize the LHD sample
dm_opt = optimize(dm, obj_function, threshold_init, num_exchanges,
max_inner, max_outer, improving_params, exploring_params)
return dm_opt.dm_best
|
StarcoderdataPython
|
3241779
|
<reponame>alvin-chang/lightning-flash
from flash.text.seq2seq.translation.data import TranslationData
from flash.text.seq2seq.translation.model import TranslationTask
|
StarcoderdataPython
|
3297156
|
import os
import argparse
import traceback
from datetime import datetime
import pandas as pd
from cmapPy.pandasGEXpress.GCToo import GCToo
from cmapPy.set_io.grp import read as parse_grp
from cmapPy.pandasGEXpress.write_gctx import write as write_gctx
from cmapPy.pandasGEXpress.write_gct import write as write_gct
def parse_condition(arg, sep=","):
"""
Parse argument for pathname, string or list. If file path exists reads GRP or TXT file.
Non-path filenames are tokenized by specified delimiter, default is ','.
Returns list
:param arg: Takes in pathname, string, or list.
:param sep: Delimiter to separate elements in string into list. Default is ','
:return: list
"""
if isinstance(arg, str):
if os.path.isfile(arg):
arg = parse_grp(arg)
else:
arg = arg.split(sep=sep)
return list(arg)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def write_args(args, out_path):
options = vars(args)
with open(os.path.join(out_path, "config.txt"), "w+") as f:
for option in options:
f.write("{}: {}\n".format(option, options[option]))
print("{}: {}".format(option, options[option]))
def write_status(success, out, exception=""):
if success:
print("SUCCESS: Output written to {}".format(out))
with open(os.path.join(out, "SUCCESS.txt"), "w") as file:
file.write("Finished on {}\n".format(datetime.now().strftime("%c")))
else:
print("FAILED: Stack traced saved to {}".format(out))
with open(os.path.join(out, "FAILURE.txt"), "w") as file:
file.write(str(exception))
file.write(traceback.format_exc())
def mk_out_dir(path, toolname, create_subdir=True):
path = os.path.abspath(path)
if not os.path.exists(path):
os.mkdir(path)
if create_subdir:
timestamp = datetime.now().strftime("_%Y%m%d%H%M%S")
out_name = "".join([toolname, timestamp])
out_path = os.path.join(path, out_name)
os.mkdir(out_path)
return out_path
else:
return path
def long_to_gctx(df):
"""
Converts long csv table to GCToo Object. Dataframe must have 'rid', 'cid' and 'value' columns
No other columns or metadata is preserved.
:param df: Long form pandas DataFrame
:return: GCToo object
"""
df = df[["rid", "cid", "value"]].pivot(index="rid", columns="cid", values="value")
gct = GCToo(df)
# Ensure index is string
gct.row_metadata_df.index = gct.row_metadata_df.index.astype("str")
gct.data_df.index = gct.data_df.index.astype("str")
return gct
def csv_to_gctx(filepaths, outpath, use_gctx=True):
"""
Convert list of csv files to gctx. CSVs must have 'rid', 'cid' and 'value' columns
No other columns or metadata is preserved.
:param filepaths: List of paths to CSVs
:param outpath: output directory of file
:param use_gctx: use GCTX HDF5 format. Default is True
:return:
"""
li = []
for filename in filepaths:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
result = pd.concat(li, axis=0, ignore_index=True)
df = result[["rid", "cid", "value"]].pivot(
index="rid", columns="cid", values="value"
)
gct = GCToo(df)
if use_gctx:
ofile = os.path.join(outpath, "result.gctx")
write_gctx(gct, ofile)
else:
ofile = os.path.join(outpath, "result.gct")
write_gct(gct, ofile)
return ofile
|
StarcoderdataPython
|
1767901
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import random
import uuid
from odoo import api, fields, models, _
from odoo.tools import email_split, email_split_and_format
class Order(models.Model):
_name = 'plant.order'
_description = 'Plant Order'
_order = 'id DESC'
_inherit = ['mail.thread', 'mail.activity.mixin', 'rating.mixin', 'utm.mixin', 'portal.mixin']
def _get_default_access_token(self):
return str(uuid.uuid4())
name = fields.Char(
'Reference', default=lambda self: _('New'),
required=True)
user_id = fields.Many2one(
'res.users', string='Responsible',
index=True, required=True,
default=lambda self: self.env.user)
date_open = fields.Date(
'Confirmation date', readonly=True)
customer_id = fields.Many2one(
'plant.customer', string='Customer',
index=True, required=True)
access_token = fields.Char(
'Security Token', copy=False,
default=_get_default_access_token)
line_ids = fields.One2many(
'plant.order.line', 'order_id', string='Order Lines')
amount_total = fields.Integer(
'Amount', compute='_compute_amount_total', store=True)
company_id = fields.Many2one(
'res.company', string='Company', related='user_id.company_id',
reaodnly=True, store=True)
currency_id = fields.Many2one(
'res.currency', string='Currency', related='company_id.currency_id',
readonly=True, required=True)
state = fields.Selection([
('draft', 'Draft'),
('open', 'Open'),
('done', 'Done'),
('cancel', 'Canceled')], string='State',
default='draft', index=True, required=True)
@api.depends('line_ids.price')
def _compute_amount_total(self):
for order in self:
order.amount_total = sum(order.mapped('line_ids.price'))
def _compute_portal_url(self):
for order in self:
order.portal_url = '/order/%s?access_token=%s' % (order.id, order.access_token)
def action_confirm(self):
if self.state != 'draft':
return
return self.write({
'state': 'open',
'date_open': fields.Datetime.now(),
})
def action_get_ratings(self):
action = self.env['ir.actions.act_window'].for_xml_id('rating', 'action_view_rating')
return dict(
action,
domain=[('res_id', 'in', self.ids), ('res_model', '=', 'plant.order')],
)
def action_send_rating(self):
rating_template = self.env.ref('plant_nursery.mail_template_plant_order_rating')
for order in self:
order.rating_send_request(rating_template, force_send=True)
def message_new(self, msg_dict, custom_values=None):
if custom_values is None:
custom_values = {}
# find or create customer
email_address = email_split(msg_dict.get('email_from', False))[0]
customer = self.env['plant.customer'].search([('email', 'ilike', email_address)], limit=1)
if not customer:
customer = self.env['plant.customer'].create({
'name': email_split_and_format(msg_dict.get('email_from', False))[0],
'email': email_address
})
# happy Xmas
plants = self.env['plant.plant'].search([])
plant = self.env['plant.plant'].browse([random.choice(plants.ids)])
custom_values.update({
'customer_id': customer.id,
'line_ids': [(4, plant.id)],
})
return super(Order, self).message_new(msg_dict, custom_values=custom_values)
class OrderLine(models.Model):
_name = 'plant.order.line'
_description = 'Plant Order Line'
_order = 'order_id DESC'
_rec_name = 'order_id'
order_id = fields.Many2one(
'plant.order', string='Order',
index=True, ondelete='cascade', required=True)
plant_id = fields.Many2one(
'plant.plant', string='Plant',
index=True, ondelete='cascade', required=True)
price = fields.Float('Price')
@api.onchange('plant_id')
def _onchange_plant_id(self):
if self.plant_id:
self.price = self.plant_id.price
def create(self, values):
if 'price' not in values:
values['price'] = self.env['plant.plant'].browse(values['plant_id']).price
return super(OrderLine, self).create(values)
|
StarcoderdataPython
|
1704019
|
from __future__ import unicode_literals
import boto3
from moto import mock_secretsmanager
import sure # noqa
@mock_secretsmanager
def test_get_secret_value():
conn = boto3.client('secretsmanager', region_name='us-west-2')
result = conn.get_secret_value(SecretId='java-util-test-password')
assert result['SecretString'] == 'mysecretstring'
|
StarcoderdataPython
|
156533
|
<reponame>niefy/LeetCodeExam
"""
题目:1006. 笨阶乘
通常,正整数 n 的阶乘是所有小于或等于 n 的正整数的乘积。例如,factorial(10) = 10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1。
相反,我们设计了一个笨阶乘 clumsy:在整数的递减序列中,我们以一个固定顺序的操作符序列来依次替换原有的乘法操作符:乘法(*),除法(/),加法(+)和减法(-)。
例如,clumsy(10) = 10 * 9 / 8 + 7 - 6 * 5 / 4 + 3 - 2 * 1。然而,这些运算仍然使用通常的算术运算顺序:我们在任何加、减步骤之前执行所有的乘法和除法步骤,并且按从左到右处理乘法和除法步骤。
另外,我们使用的除法是地板除法(floor division),所以 10 * 9 / 8 等于 11。这保证结果是一个整数。
实现上面定义的笨函数:给定一个整数 N,它返回 N 的笨阶乘。
示例 1:
输入:4
输出:7
解释:7 = 4 * 3 / 2 + 1
示例 2:
输入:10
输出:12
解释:12 = 10 * 9 / 8 + 7 - 6 * 5 / 4 + 3 - 2 * 1
提示:
1 <= N <= 10000
-2^31 <= answer <= 2^31 - 1 (答案保证符合 32 位整数。)
@author Niefy
@date 2019-03-12
"""
import math
class Solution:
def clumsy(self, n: int) -> int:
if n==1:
return 1
elif n==2:
return 2*1
elif n==3:
return math.floor(3*2/1)
res=0
if n>3:
res=math.floor(n*(n-1)/(n-2))
n=n-3
while n>3:
res=res+n-math.floor((n-1)*(n-2)/(n-3))
n=n-4
if n>0: # res+3-2*1、res+2-1、res+1结果都为res+1
res=res+1
return res
#测试代码
t=Solution()
print(t.clumsy(1))
print(t.clumsy(2))
print(t.clumsy(3))
print(t.clumsy(4))
print(t.clumsy(5))
print(t.clumsy(10))
|
StarcoderdataPython
|
153877
|
#!/usr/bin/env python
#
# Public Domain 2014-2017 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
import wiredtiger, wttest, run
# test_join04.py
# Join operations
# Joins with a custom extractor, using equality joins
class test_join04(wttest.WiredTigerTestCase):
table_name1 = 'test_join04'
nentries = 100
def conn_extensions(self, extlist):
extlist.skip_if_missing = True
extlist.extension('extractors', 'csv')
# JIRA WT-2308:
# Test extractors with equality joins
def test_join_extractor(self):
self.session.create('table:join04',
'key_format=i,value_format=S,columns=(k,v)')
self.session.create('index:join04:index1',
'key_format=i,extractor=csv,' +
'app_metadata={"format" : "i", "field" : "0"}')
self.session.create('index:join04:index2',
'key_format=i,extractor=csv,' +
'app_metadata={"format" : "i", "field" : "1"}')
cursor1 = self.session.open_cursor('table:join04', None, None)
cursor1[1] = '10,21'
cursor1[2] = '10,22'
cursor1.close()
cursor1 = self.session.open_cursor('index:join04:index1', None, None)
cursor1.set_key(10)
cursor1.search()
cursor2 = self.session.open_cursor('index:join04:index2', None, None)
cursor2.set_key(22)
cursor2.search()
jcursor = self.session.open_cursor('join:table:join04', None, None)
self.session.join(jcursor, cursor1, 'compare=eq')
self.session.join(jcursor, cursor2, 'compare=eq')
found = 0
while jcursor.next() == 0:
[k] = jcursor.get_keys()
[v] = jcursor.get_values()
self.assertEqual(k, 2)
self.assertEqual(v, '10,22')
found += 1
self.assertEqual(found, 1)
jcursor.close()
cursor1.close()
cursor2.close()
# More tests using extractors with equality joins
def test_join_extractor_more(self):
self.session.create('table:join04',
'key_format=i,value_format=S,columns=(k,v)')
self.session.create('index:join04:index1',
'key_format=i,extractor=csv,' +
'app_metadata={"format" : "i", "field" : "0"}')
self.session.create('index:join04:index2',
'key_format=i,extractor=csv,' +
'app_metadata={"format" : "i", "field" : "1"}')
self.session.create('index:join04:index3',
'key_format=i,extractor=csv,' +
'app_metadata={"format" : "i", "field" : "2"}')
jcursor = self.session.open_cursor('join:table:join04', None, None)
cursor1 = self.session.open_cursor('table:join04', None, None)
k = 1
for v in ['10,21,30','10,22,30','10,23,30',
'11,21,30','11,22,30','11,23,30',
'10,21,31','10,22,31','10,23,31',
'10,21,30','11,22,31','12,23,32']:
cursor1[k] = v
k += 1
cursor1.close()
# A list of tests, one per line, each entry is:
# [[list of inputs], [list of outputs]]
tests = [
[[10,22,30], ['10,22,30']],
[[10,21,30], ['10,21,30','10,21,30']],
[[11], ['11,21,30','11,22,30','11,23,30','11,22,31']],
[[None,22], ['10,22,30','11,22,30','10,22,31','11,22,31']]]
for t in tests:
jcursor = self.session.open_cursor('join:table:join04', None, None)
ins = t[0]
outs = t[1]
cursors = []
n = 0
for k in ins:
n += 1
if k == None: continue
uri = 'index:join04:index' + str(n)
c = self.session.open_cursor(uri, None, None)
c.set_key(k)
self.assertEqual(c.search(), 0)
cursors.append(c)
self.session.join(jcursor, c, 'compare=eq')
while jcursor.next() == 0:
[k] = jcursor.get_keys()
[v] = jcursor.get_values()
#self.tty('got=' + str(v) + ' at key=' + str(k))
self.assertTrue(v in outs)
outs.remove(v)
self.assertEqual(len(outs), 0)
jcursor.close()
for c in cursors:
c.close()
if __name__ == '__main__':
wttest.run()
|
StarcoderdataPython
|
70602
|
<gh_stars>1-10
#-----------------------------------------------------------------------------
#
# Copyright (c) 2006 by Enthought, Inc.
# All rights reserved.
#
# Author: <NAME> <<EMAIL>>
#
#-----------------------------------------------------------------------------
# Standard library imports
import logging
# Enthought library imports
import apptools.sweet_pickle as sweet_pickle
from apptools.sweet_pickle.global_registry import _clear_global_registry
from traits.api import Bool, Float, HasTraits, Int, Str
logger = logging.getLogger(__name__)
##############################################################################
# Classes to use within the tests
##############################################################################
class Foo(HasTraits):
_enthought_pickle_version = Int(1)
b1 = Bool(False)
f1 = Float(1)
i1 = Int(1)
s1 = Str('foo')
class Bar(HasTraits):
_enthought_pickle_version = Int(2)
b2 = Bool(True)
f2 = Float(2)
i2 = Int(2)
s2 = Str('bar')
class Baz(HasTraits):
_enthought_pickle_version = Int(3)
b3 = Bool(False)
f3 = Float(3)
i3 = Int(3)
s3 = Str('baz')
def __setstate__(self, state):
logger.debug('Running Baz\'s original __setstate__')
if state['_enthought_pickle_version'] < 3:
info = [('b2', 'b3'), ('f2', 'f3'), ('i2', 'i3'), ('s2', 's3')]
for old, new in info:
if old in state:
state[new] = state[old]
del state[old]
state['_enthought_pickle_version'] = 3
self.__dict__.update(state)
### EOF ######################################################################
|
StarcoderdataPython
|
3331270
|
<reponame>PandaDrunkard/proex<filename>AOJ/ALDS1_5_C.py<gh_stars>0
import math
def stdinput():
from sys import stdin
return stdin.readline().strip()
def main():
n = int(stdinput())
nodes = [[0., 0.], [100., 0.]]
for _ in range(n):
next_nodes = []
for s_node, e_node in zip(nodes[:], nodes[1:]):
next_nodes.append(s_node)
next_nodes += koch(s_node, e_node)
next_nodes.append(nodes[-1])
nodes = next_nodes
for node in nodes:
print(*map(lambda a: f'{round(a,8):.8f}', node))
def koch(start, end):
nodes = []
dx = (end[0] - start[0]) / 3
dy = (end[1] - start[1]) / 3
s = [start[0] + dx, start[1] + dy]
t = [start[0] + 2 * dx, start[1] + 2 * dy]
nodes.append(s)
nodes.append(determine_koch(s, t))
nodes.append(t)
return nodes
def determine_koch(s, t):
dx = (t[0] - s[0]) * math.sqrt(3) / 2
dy = (t[1] - s[1]) * math.sqrt(3) / 2
u0 = [s[0] - dy, s[1] + dx]
u1 = [t[0] - dy, t[1] + dx]
return [
(u0[0] + u1[0]) / 2,
(u0[1] + u1[1]) / 2
]
if __name__ == '__main__':
main()
# import cProfile
# cProfile.run('main()')
|
StarcoderdataPython
|
1788690
|
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep
with open("login.txt", "r+") as file:
content = file.readlines()
username = content[0].split("=")[1][1:-2]
password = content[1].split("=")[1][1:-2]
path = content[2].split("=")[1][1:-1] + "\chromedriver.exe"
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--disable-gpu")
chrome_options.add_experimental_option("excludeSwitches", ["enable-automaition"])
driver = webdriver.Chrome(path, chrome_options = chrome_options)
driver.get("https://entrar.in/login/login")
actions = ActionChains(driver)
#Login section
try:
username_feild = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.NAME, "username"))
)
except:
print("probably the website is down")
username_feild.send_keys(username)
password_feild = driver.find_element_by_name("password")
password_feild.send_keys(password)
capchta = driver.find_element_by_class_name("label-input100")
print(capchta.text)
capchta_result = int(capchta.text[0]) + int(capchta.text[4])
capchta_feild = driver.find_element_by_name("captcha")
capchta_feild.send_keys(capchta_result)
capchta_feild.send_keys(Keys.RETURN)
print("Signed in")
#online classroom section
driver.implicitly_wait(5)
driver.get("https://entrar.in/classroom_creation_crm_new/s_display")
print("reached")
try:
join_class = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//tbody[1]/tr[1]/td[5]/a"))
)
except:
print("Class is not yet enabled")
#join class section
driver.get(join_class.get_attribute("href"))
print("joined")
|
StarcoderdataPython
|
1612633
|
<gh_stars>100-1000
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: annotations.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import math_pb2
import dom_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='annotations.proto',
package='userfeedback',
serialized_pb=_b('\n\x11\x61nnotations.proto\x12\x0cuserfeedback\x1a\nmath.proto\x1a\tdom.proto\"\x7f\n\nAnnotation\x12*\n\trectangle\x18\x01 \x02(\x0b\x32\x17.userfeedback.Rectangle\x12\x0f\n\x07snippet\x18\x02 \x01(\t\x12\x34\n\x14\x61nnotatedElementPath\x18\x03 \x01(\x0b\x32\x16.userfeedback.HtmlPathB\x02H\x03')
,
dependencies=[math_pb2.DESCRIPTOR,dom_pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ANNOTATION = _descriptor.Descriptor(
name='Annotation',
full_name='userfeedback.Annotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rectangle', full_name='userfeedback.Annotation.rectangle', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snippet', full_name='userfeedback.Annotation.snippet', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='annotatedElementPath', full_name='userfeedback.Annotation.annotatedElementPath', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=185,
)
_ANNOTATION.fields_by_name['rectangle'].message_type = math_pb2._RECTANGLE
_ANNOTATION.fields_by_name['annotatedElementPath'].message_type = dom_pb2._HTMLPATH
DESCRIPTOR.message_types_by_name['Annotation'] = _ANNOTATION
Annotation = _reflection.GeneratedProtocolMessageType('Annotation', (_message.Message,), dict(
DESCRIPTOR = _ANNOTATION,
__module__ = 'annotations_pb2'
# @@protoc_insertion_point(class_scope:userfeedback.Annotation)
))
_sym_db.RegisterMessage(Annotation)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003'))
# @@protoc_insertion_point(module_scope)
|
StarcoderdataPython
|
3247430
|
<filename>apployer/fetcher/jumpbox_utilities.py
#
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TODO refactor the file and turn on pylint
# pylint: skip-file
import re
import json
import yaml
import logging
import tempfile
import subprocess
import shutil
import urlparse
import xml.etree.ElementTree as ET
import ConfigParser
from .expressions import ExpressionsEngine, FsKeyValueStore, return_fixed_output
GENERATE_KEYTAB_SCRIPT = """#!/bin/sh
function tmp_file () {
TFILE="/tmp/$(basename $0).$$.keytab"
echo $TFILE
}
TMP=$(tmp_file)
CMD=$(
{
PRINC=$@
echo "xst -norandkey -k $TMP $PRINC"
})
sudo kadmin.local -q "$CMD" 2&> /dev/null
sudo base64 $TMP
sudo rm $TMP
"""
PORT_CHECKER_SCRIPT = """
import socket;
import sys;
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(("{hostname}", {port}))
print(result);
"""
hgm_service_name = 'HADOOPGROUPSMAPPING'
hgm_role_name = 'HADOOPGROUPSMAPPING-HADOOPGROUPSMAPPING_RESTSERVER'
DEFAULT_SENTRY_PORT = 8038
DEFAULT_ARCADIA_PORT = 80
DEFAULT_HUE_PORT = 8888
DEFAULT_H2O_PROVISIONER_PORT = '9876'
DEFAULT_OOZIE_PORT = '11000'
DEFAULT_YARN_PORT = '8032'
class ConfigurationExtractor(object):
def __init__(self, config):
self._logger = logging.getLogger(__name__)
self._hostname = config['jumpbox']['hostname']
self._ssh_required = False if self._hostname == 'localhost' else True
self._hostport = config['jumpbox']['hostport']
self._username = config['jumpbox']['username']
self._ssh_key_filename = config['jumpbox']['key_filename']
self._ssh_key_password = config['jumpbox']['key_password']
self._kerberos_used = config['kerberos_used']
self._kubernetes_used = config['kubernetes_used']
self._paths = config['paths']
self._cdh_manager_user = config['cdh-manager']['user']
self._cdh_manager_password = config['<PASSWORD>']['password']
self._cdh_manager_port = config['cdh-manager']['cm_port']
self._cdh_manager_ssh_user = config['cdh-manager']['ssh_user']
self._inventory = self._generate_inventory(config['workers_count'], config['masters_count'], config['envname'])
self._envname = config['envname']
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def paths(self):
return self._paths
@property
def ssh_required(self):
return self._ssh_required
def evaluate_expressions(self, deployment_variables):
self._logger.info('Evaluating expressions from deployment variables')
passwords_store = FsKeyValueStore(self)
self._exppressions_engine = ExpressionsEngine(passwords_store)
for key, value in deployment_variables.iteritems():
deployment_variables[key] = self._exppressions_engine.parse_and_apply_expression(key, value)
self._logger.info('Expressions evaluated')
return deployment_variables
def get_deployment_configuration(self):
self._logger.info('Getting deployment configuration')
self._jumpboxes_vars = self._get_ansible_hosts()
cf_tiny_yml_data = self._get_data_from_cf_tiny_yaml()
docker_broker_yml = self._get_data_from_docker_broker_yaml()
defaults_cdh_yml = self._get_data_from_defaults_cdh_yaml()
cdh_manager_data = self._get_data_from_cdh_manager()
self._logger.info('Deployment configuration downloaded')
return dict(cf_tiny_yml_data.items() + cdh_manager_data.items() + docker_broker_yml.items() + defaults_cdh_yml.items())
def _get_ansible_hosts(self):
inventory_file_content = return_fixed_output(
self.execute_command('sudo -i cat ' + self._paths['ansible_hosts']), rstrip=False)
with tempfile.NamedTemporaryFile('w') as f:
f.file.write(inventory_file_content)
f.file.close()
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(open(f.name))
return config
def _get_ansible_var(self, option, section='jump-boxes:vars', default_value=''):
if self._jumpboxes_vars.has_option(section, option):
return self._jumpboxes_vars.get(section, option)
else:
return default_value
def _get_data_from_cf_tiny_yaml(self):
cf_tiny_yaml_file_content = self.execute_command('sudo -i cat ' + self._paths['cf_tiny_yml'])
cf_tiny_yaml_file_content = return_fixed_output(cf_tiny_yaml_file_content, rstrip=False)
cf_tiny_yaml = yaml.load(cf_tiny_yaml_file_content)
result = {
"nats_ip": cf_tiny_yaml['properties']['nats']['machines'][0],
"h2o_provisioner_port": DEFAULT_H2O_PROVISIONER_PORT,
"cf_admin_password": cf_tiny_yaml['properties']['loggregator_endpoint']['shared_secret'],
"cf_admin_client_password": cf_tiny_yaml['properties']['loggregator_endpoint']['shared_secret'],
"apps_domain": cf_tiny_yaml['properties']['domain'],
"tap_console_password": cf_tiny_yaml['properties']['loggregator_endpoint']['shared_secret'],
"atk_client_pass": cf_tiny_yaml['properties']['loggregator_endpoint']['shared_secret'],
"email_address": cf_tiny_yaml['properties']['login']['smtp']['senderEmail'],
"run_domain": cf_tiny_yaml['properties']['domain'],
"smtp_pass": '"{}"'.format(cf_tiny_yaml['properties']['login']['smtp']['password']),
"smtp_user": '"{}"'.format(cf_tiny_yaml['properties']['login']['smtp']['user']),
"smtp_port": cf_tiny_yaml['properties']['login']['smtp']['port'],
"smtp_host": cf_tiny_yaml['properties']['login']['smtp']['host'],
"smtp_protocol": self._determine_smtp_protocol(cf_tiny_yaml['properties']['login']['smtp']['port']),
"cloudera_manager_internal_host": self._inventory['cdh-manager'][0]
}
for i, node in enumerate(self._inventory['cdh-master']):
result['master_node_host_{}'.format(i + 1)] = node
return result
def _get_data_from_docker_broker_yaml(self):
docker_broker_yaml_file_content = self.execute_command('sudo -i cat ' + self._paths['docker_broker_yml'])
docker_broker_yaml_file_content = return_fixed_output(docker_broker_yaml_file_content, rstrip=False)
docker_broker_yaml = yaml.load(docker_broker_yaml_file_content)
return {
"h2o_provisioner_host": docker_broker_yaml['jobs'][0]['networks'][0]['static_ips'][0]
}
def _get_data_from_defaults_cdh_yaml(self):
defaults_cdh_yaml_file_content = self.execute_command('sudo -i cat ' + self._paths['defaults_cdh_yml'])
defaults_cdh_yaml_file_content = return_fixed_output(defaults_cdh_yaml_file_content, rstrip=False)
defaults_cdh_yaml = yaml.load(defaults_cdh_yaml_file_content)
return {
"kerberos_password": defaults_cdh_yaml['cf_kerberos_password']
}
def _get_data_from_cdh_manager(self):
self._cdh_manager_hostname = self._inventory['cdh-manager'][0]
deployments_settings_endpoint = 'http://{}:{}/api/v10/cm/deployment'.format(self._cdh_manager_hostname,
self._cdh_manager_port)
self._logger.info('Send request to %s', deployments_settings_endpoint)
response = self.execute_command('curl -X GET {} -u {}:{}'
.format(deployments_settings_endpoint, self._cdh_manager_user,
self._cdh_manager_password))
deployment_settings = json.loads(response)
result = dict()
result['sentry_port'] = ''
result['sentry_address'] = ''
result['sentry_keytab_value'] = ''
result['hdfs_keytab_value'] = ''
result['auth_gateway_keytab_value'] = ''
result['vcap_keytab_value'] = ''
result['hgm_keytab_value'] = ''
result['krb5_base64'] = ''
result['kerberos_cacert'] = ''
result['auth_gateway_profile'] = 'cloud,warehouse-auth-gateway,zookeeper-auth-gateway,hdfs-auth-gateway,' \
'https-hgm-auth-gateway,yarn-auth-gateway,hbase-auth-gateway'
if self._kerberos_used:
result['kerberos_host'] = self._cdh_manager_hostname
result['hdfs_keytab_value'] = self._generate_keytab('hdfs')
result['auth_gateway_keytab_value'] = self._generate_keytab('authgateway/sys')
result['hgm_keytab_value'] = self._generate_keytab('hgm/sys')
result['vcap_keytab_value'] = self._generate_keytab('vcap')
result['krb5_base64'] = self._generate_base64_for_file('/etc/krb5.conf')
result['kerberos_cacert'] = self._generate_base64_for_file('/var/krb5kdc/cacert.pem')
sentry_service = self._find_item_by_attr_value('SENTRY', 'name',
deployment_settings['clusters'][0]['services'])
result['sentry_port'] = self._find_item_by_attr_value('sentry_service_server_rpc_port', 'name',
sentry_service['config']['items']).get('value') \
or DEFAULT_SENTRY_PORT
result['sentry_address'] = self._get_host('SENTRY', 'SENTRY-SENTRY_SERVER', deployment_settings).get(
'hostname')
result['sentry_keytab_value'] = self._generate_keytab('hive/sys')
result[
'auth_gateway_profile'] = 'cloud,kerberos-warehouse-auth-gateway,zookeeper-auth-gateway,hdfs-auth-gateway,' \
'kerberos-hgm-auth-gateway,yarn-auth-gateway,hbase-auth-gateway'
result['vpc'] = ''
result['region'] = ''
result['kubernetes_aws_access_key_id'] = ''
result['kubernetes_aws_secret_access_key'] = ''
result['key_name'] = ''
result['consul_dc'] = ''
result['consul_join'] = ''
result['kubernetes_subnet_cidr'] = ''
result['kubernetes_subnet'] = ''
result['quay_io_username'] = ''
result['quay_io_password'] = ''
if self._get_ansible_var('provider') == 'aws' and self._kubernetes_used:
result['vpc'] = self._get_vpc_id()
result['region'] = self._get_ansible_var('region')
result['kubernetes_aws_access_key_id'] = self._get_ansible_var('kubernetes_aws_access_key_id')
result['kubernetes_aws_secret_access_key'] = self._get_ansible_var('kubernetes_aws_secret_access_key')
result['key_name'] = self._get_ansible_var('key_name')
result['consul_dc'] = self._envname
result['consul_join'] = return_fixed_output(self.execute_command('host cdh-master-0')).split()[3]
result['kubernetes_subnet'] = self._get_ansible_var('kubernetes_subnet_id')
command_output = self.execute_command(
'aws --region {} ec2 describe-subnets --filters Name=subnet-id,Values={}'
.format(result['region'], result['kubernetes_subnet']))
subnet_json = json.loads(return_fixed_output(command_output, rstrip=False))
result['kubernetes_subnet_cidr'] = subnet_json['Subnets'][0]['CidrBlock']
result['quay_io_username'] = self._get_ansible_var('quay_io_username')
result['quay_io_password'] = self._get_ansible_var('quay_io_password')
result['java_http_proxy'] = ''
if self._get_ansible_var('provider') == 'openstack':
result['java_http_proxy'] = self._get_java_http_proxy()
result['kubernetes_used'] = self._kubernetes_used
hgm_service = self._find_item_by_attr_value(hgm_service_name, 'name',
deployment_settings['clusters'][0]['services'])
hgm_protocol = 'http://' if self._kerberos_used else 'https://'
result['hgm_adress'] = hgm_protocol + self._get_host(hgm_service_name, hgm_role_name, deployment_settings)[
'hostname'] + ':' + self._find_item_by_attr_value('rest_port', 'name',
self._find_item_by_attr_value(hgm_role_name + '-BASE',
'name', hgm_service[
'roleConfigGroups'])[
'config']['items'])['value']
result['hgm_password'] = self._find_item_by_attr_value('basic_auth_pass', 'name',
self._find_item_by_attr_value(hgm_role_name + '-BASE',
'name', hgm_service[
'roleConfigGroups'])[
'config']['items'])['value']
result['hgm_username'] = self._find_item_by_attr_value('basic_auth_user', 'name',
self._find_item_by_attr_value(hgm_role_name + '-BASE',
'name', hgm_service[
'roleConfigGroups'])[
'config']['items'])['value']
result['oozie_server'] = 'http://' + self._get_host('OOZIE', 'OOZIE-OOZIE_SERVER', deployment_settings)[
'hostname'] + ':' + DEFAULT_OOZIE_PORT
result['job_tracker'] = self._get_host('YARN', 'YARN-GATEWAY', deployment_settings)[
'hostname'] + ':' + DEFAULT_YARN_PORT
sqoop_client = self._find_item_by_attr_value('SQOOP_CLIENT', 'name',
deployment_settings['clusters'][0]['services'])
sqoop_entry = self._find_item_by_attr_value('sqoop-conf/sqoop-site.xml_client_config_safety_valve', 'name',
self._find_item_by_attr_value('SQOOP_CLIENT-GATEWAY-BASE', 'name',
sqoop_client['roleConfigGroups'])[
'config']['items'])['value']
result['metastore'] = self._get_property_value(sqoop_entry, 'sqoop.metastore.client.autoconnect.url')
result['cloudera_address'] = self._inventory['cdh-manager'][0]
result['cloudera_port'] = self._cdh_manager_port
result['cloudera_user'] = self._cdh_manager_user
result['cloudera_password'] = self._cdh_manager_password
result['namenode_internal_host'] = self._get_host('HDFS', 'HDFS-NAMENODE', deployment_settings)['hostname']
result['hue_node'] = self._get_host('HUE', 'HUE-HUE_SERVER', deployment_settings)['hostname']
result['hue_port'] = DEFAULT_HUE_PORT
result['external_tool_hue'] = self._check_port(result['hue_node'], result['hue_port'])
result['h2o_node'] = self._inventory['cdh-worker'][0]
result['arcadia_node'] = self._inventory['cdh-worker'][0]
result['arcadia_port'] = DEFAULT_ARCADIA_PORT
result['external_tool_arcadia'] = self._check_port(result['arcadia_node'], result['arcadia_port'])
cluster_name = deployment_settings['clusters'][0]['name']
result['import_hadoop_conf_hdfs'] = self._get_client_config_for_service('HDFS', cluster_name)
result['import_hadoop_conf_hbase'] = self._get_client_config_for_service('HBASE', cluster_name)
result['import_hadoop_conf_yarn'] = self._get_client_config_for_service('YARN', cluster_name)
result['import_hadoop_conf_hive'] = self._get_client_config_for_service('HIVE', cluster_name)
return result
def _get_java_http_proxy(self):
http_proxy = self._get_ansible_var('http_proxy')
https_proxy = self._get_ansible_var('https_proxy')
no_proxy = self._get_ansible_var('no_proxy')
http_proxy_host, http_proxy_port = self._parse_url_if_not_empty(http_proxy)
https_proxy_host, https_proxy_port = self._parse_url_if_not_empty(https_proxy)
non_proxy_hosts = self._convert_no_proxy_to_java_style(no_proxy)
return (self._fill_if_var_not_empty('-Dhttp.proxyHost={} ', http_proxy_host) + \
self._fill_if_var_not_empty('-Dhttp.proxyPort={} ', http_proxy_port) + \
self._fill_if_var_not_empty('-Dhttps.proxyHost={} ', https_proxy_host) + \
self._fill_if_var_not_empty('-Dhttps.proxyPort={} ', https_proxy_port) + \
self._fill_if_var_not_empty('-Dhttp.nonProxyHosts={} ', non_proxy_hosts)).strip()
def _parse_url_if_not_empty(self, url):
if url:
splitted = urlparse.urlsplit(url)
return splitted.hostname, splitted.port
else:
return '', ''
def _convert_no_proxy_to_java_style(self, no_proxy):
if not no_proxy:
return ''
no_proxy = re.sub(r'^\.', '*.', no_proxy)
no_proxy = no_proxy.replace(',.', '|*.')
no_proxy = no_proxy.replace(',', '|')
no_proxy += '|localhost|127.*|[::1]' # these entries don't reside in /etc/ansible/hosts
return no_proxy
def _fill_if_var_not_empty(self, template, value):
return template.format(value) if value else ''
def _get_mac_address(self):
output = self.execute_command('curl http://169.254.169.254/latest/meta-data/network/interfaces/macs/')
return return_fixed_output(output)
def _get_vpc_id(self):
mac_address = self._get_mac_address()
output = self.execute_command('curl http://169.254.169.254/latest/meta-data/network/interfaces/macs/{}/vpc-id'
.format(mac_address))
return return_fixed_output(output)
def _generate_inventory(self, workers_count, masters_count, envname):
hosts = {
'cdh-master': [],
'cdh-worker': [],
'cdh-manager': []
}
for i in range(workers_count):
hosts['cdh-worker'].append('cdh-worker-{}.node.{}.consul'.format(i, envname))
for i in range(masters_count):
hosts['cdh-master'].append('cdh-master-{}.node.{}.consul'.format(i, envname))
hosts['cdh-manager'].append(hosts['cdh-master'][2])
return hosts
def execute_command(self, command):
if self._ssh_required:
self._logger.info('Execute remote command {} on {} machine.'.format(command, self._hostname))
command_template = 'ssh -i {keyname} -tt {username}@{hostname} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {command}'
command_to_execute = command_template.format(keyname=self._ssh_key_filename, username=self._username,
hostname=self._hostname, command=command)
output = subprocess.check_output(command_to_execute.split())
return return_fixed_output(output, rstrip=False)
else:
self._logger.info('Calling local command: %s', command)
return subprocess.check_output(command, shell=True)
def _generate_script(self, script, target):
with tempfile.NamedTemporaryFile('w') as f:
f.file.write(script)
f.file.close()
if self._ssh_required:
command = 'scp -i {keyname} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {script_name} {username}@{hostname}:{target}'.format(
keyname=self._ssh_key_filename, script_name=f.name, username=self._username,
hostname=self._hostname, target=target)
self._logger.info('Execute command: {}'.format(command))
subprocess.check_call(command.split())
else:
shutil.copyfile(f.name, target)
def _generate_keytab(self, principal_name):
self._logger.info('Generating keytab for {} principal.'.format(principal_name))
self._generate_script(GENERATE_KEYTAB_SCRIPT, '/tmp/generate_keytab_script.sh')
COPY_KEYTAB_SCRIPT = 'sudo -i scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ' \
'/tmp/generate_keytab_script.sh {}@{}:/tmp/'.format(self._cdh_manager_ssh_user,
self._cdh_manager_hostname)
if self._ssh_required:
CHMOD_KEYTAB_SCRIPT = 'sudo -i ssh -tt {}@{} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ' \
'"chmod 700 /tmp/generate_keytab_script.sh"'.format(self._cdh_manager_ssh_user,
self._cdh_manager_hostname)
EXECUTE_KEYTAB_SCRIPT = 'sudo -i ssh -tt {}@{} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ' \
'"/tmp/generate_keytab_script.sh {}"'.format(self._cdh_manager_ssh_user,
self._cdh_manager_hostname,
principal_name)
else:
CHMOD_KEYTAB_SCRIPT = 'sudo -i ssh -tt {}@{} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ' \
'chmod 700 /tmp/generate_keytab_script.sh'.format(self._cdh_manager_ssh_user,
self._cdh_manager_hostname)
EXECUTE_KEYTAB_SCRIPT = 'sudo -i ssh -tt {}@{} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ' \
'/tmp/generate_keytab_script.sh {}'.format(self._cdh_manager_ssh_user,
self._cdh_manager_hostname,
principal_name)
try:
self.execute_command(COPY_KEYTAB_SCRIPT)
self.execute_command(CHMOD_KEYTAB_SCRIPT)
keytab_hash = self.execute_command(EXECUTE_KEYTAB_SCRIPT)
except subprocess.CalledProcessError as e:
self._logger.error('Process failed with exit code %s and output %s', e.returncode, e.output)
raise e
keytab_hash = return_fixed_output(keytab_hash)
self._logger.info('Keytab for %s principal has been generated.', principal_name)
return keytab_hash
def _check_port(self, hostname, port):
self._logger.info('Check is port %d open on %s machine.', port, hostname)
port_checker_script = PORT_CHECKER_SCRIPT.format(hostname=hostname, port=port)
self._generate_script(port_checker_script, '/tmp/check_port.py')
status = int(return_fixed_output(self.execute_command('sudo -i python /tmp/check_port.py')))
return False if status else True
def _generate_base64_for_file(self, file_path):
self._logger.info('Generating base64 for %s file.', file_path)
if self._ssh_required:
GENERATE_BASE_64 = 'sudo -i ssh -tt {}@{} -o UserKnownHostsFile=/dev/null ' \
'-o StrictHostKeyChecking=no "base64 {}"' \
.format(self._cdh_manager_ssh_user, self._cdh_manager_hostname, file_path)
else:
GENERATE_BASE_64 = 'sudo -i ssh -tt {}@{} -o UserKnownHostsFile=/dev/null ' \
'-o StrictHostKeyChecking=no base64 {}' \
.format(self._cdh_manager_ssh_user, self._cdh_manager_hostname, file_path)
base64_file_hash = self.execute_command(GENERATE_BASE_64)
base64_file_hash = return_fixed_output(base64_file_hash)
self._logger.info('Base64 hash for %s file on %s machine has been generated.', file_path,
self._cdh_manager_hostname)
return base64_file_hash
def _get_client_config_for_service(self, service_name, cluster_name):
self.execute_command('wget http://{}:{}/api/v10/clusters/{}/services/{}/clientConfig '
'--password {} --user {} -P {}'
.format(self._cdh_manager_hostname, self._cdh_manager_port, cluster_name, service_name,
self._cdh_manager_password, self._cdh_manager_user, service_name))
base64_file_hash = self.execute_command('base64 {}/clientConfig'.format(service_name))
self.execute_command('rm -r {}'.format(service_name))
result = base64_file_hash.splitlines()
return ''.join(result)
def _determine_smtp_protocol(self, port):
self._logger.info('Determining mail protocol')
if port in (465,):
return 'smtps'
elif port in (25, 587, 2525):
return 'smtp'
else:
self._logger.info('Custom mail port is set, '
'set your mail protocol manually in template_variables.yml file and run script once again!')
return None
def _find_item_by_attr_value(self, attr_value, attr_name, array_with_dicts):
try:
return next(item for item in array_with_dicts if item[attr_name] == attr_value)
except StopIteration:
return dict()
def _get_host(self, service_name, role_name, settings):
hdfs_service = self._find_item_by_attr_value(service_name, 'name', settings['clusters'][0]['services'])
hdfs_namenode = self._find_item_by_attr_value(role_name, 'name', hdfs_service['roles'])
host_id = hdfs_namenode['hostRef']['hostId']
return self._find_item_by_attr_value(host_id, 'hostId', settings['hosts'])
def _get_property_value(self, config, key):
properties = ET.fromstring('<properties>' + config + '</properties>')
for property in properties:
if property.find('name').text == key:
return property.find('value').text
|
StarcoderdataPython
|
1740490
|
#%%
# <NAME> 12-Apr-2019
# Iris Data Set Project.
# Scatterplots of Iris Dataset
# import pandas, seaborn and matplotlib libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# import iris dataset with Pandas
f = pd.read_csv("iris.csv")
# set the key parameters for the 'Sepal Lenght' scatter plot
ax = sns.scatterplot(x="sepal_length", y="sepal_width", hue="species", data=f)
# set the title of the scatter plot
plt.title("Scatter Plot - 'Sepal Length and Sepal Width' of all 3 species")
plt.show()
# set the key parameters for the 'Sepal Width' scatter plot
ax = sns.scatterplot(x="petal_length", y="petal_width", hue="species", data=f)
# set the title of the scatter plot
plt.title("Scatter Plot - 'Petal Length and Petal Width' of all 3 species")
plt.show()
|
StarcoderdataPython
|
1736451
|
<filename>kedua.py<gh_stars>0
import shapefile
class Kedua:
def __init__(self):
self.kedua = shapefile.Writer('kedua', shapeType = shapefile.POLYGON)
self.kedua.shapeType
self.kedua.field('nama_ruangan', 'C')
#<NAME> - 1174035
def tanggaBawahKiri(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, -2], [4, -2], [4, 1], [-3, 1], [-3, -2]]])
#<NAME> - 1174035
def tanggaBawahKanan(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, -2], [29, -2], [29, 1], [22, 1], [22, -2]]])
#<NAME> - 1174035
def tanggaAtasKiri(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 40], [4, 40], [4, 43], [-3, 43], [-3, 40]]])
#<NAME> - 1174035
def tanggaAtasKanan(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 40], [29, 40], [29, 43], [22, 43], [22, 40]]])
#<NAME> - 1174035
def tamanKosongTengah(self, label):
self.kedua.record(label)
self.kedua.poly([[[7, 11], [19, 11], [19, 30], [7, 30], [7, 11]]])
#<NAME> - 1174040
def R213(self, label):
self.kedua.record(label)
self.kedua.poly([[[7, 40], [10, 40], [10, 33], [7, 33], [7, 40]]])
#<NAME> - 1174040
def IRC(self, label):
self.kedua.record(label)
self.kedua.poly([[[10, 40], [13, 40], [13, 33], [10, 33], [10, 40]]])
#<NAME> - 1174040
def RLabBisnis(self, label):
self.kedua.record(label)
self.kedua.poly([[[13, 40], [16, 40], [16, 33], [13, 33], [13, 40]]])
#<NAME> - 1174040
def RLabComprehensive(self, label):
self.kedua.record(label)
self.kedua.poly([[[16, 40], [19, 40], [19, 33], [16, 33], [16, 40]]])
#<NAME>-1174059
def ruang208(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 22], [4, 22], [4, 25], [-3, 25], [-3, 22]]])
#<NAME>an-1174059
def ruang209(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 25], [4, 25], [4, 28], [-3, 28], [-3, 25]]])
#<NAME> Nainggolan-1174059
def ruang210(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 28], [4, 28], [4, 31], [-3, 31], [-3, 28]]])
#<NAME> - 1174043
def ruangan205(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 13], [4, 13], [4, 16], [-3, 16], [-3, 13]]])
#<NAME> - 1174043
def ruangan206(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 16], [4, 16], [4, 19], [-3, 19], [-3, 16]]])
#<NAME> - 1174043
def ruangan207(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 19], [4, 19], [4, 22], [-3, 22], [-3, 19]]])
#<NAME> - 1174039
def ruangan201(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 1], [4, 1], [4, 4], [-3, 4], [-3, 1]]])
#<NAME> - 1174039
def ruangan202(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 4], [4, 4], [4, 7], [-3, 7], [-3, 4]]])
#<NAME> - 1174057
def RServer(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 34], [29, 34], [29, 37], [22, 37], [22, 34]]])
#<NAME> - 1174057
def LabLogistik(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 31], [29, 31], [29, 34], [22, 34], [22, 31]]])
#<NAME> - 1174050
def ruangan219(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 25], [29, 25], [29, 31], [22, 31], [22, 25]]])
#<NAME> - 1174050
def ruangan220(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 22], [29, 22], [29, 25], [22, 25], [22, 22]]])
#<NAME> - 1174042
def toiletdosen(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 38.5], [29, 38.5], [29, 40], [22, 40], [22, 38.5]]])
#<NAME> - 1174042
def toiletcowo(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 37], [29, 37], [29, 38.5], [22, 38.5], [22, 37]]])
#<NAME> - 1174042
def prodiak(self, label):
self.kedua.record(label)
self.kedua.poly([[[7, 1], [19, 1], [19, 8], [7, 8], [7, 1]]])
#<NAME> - 1174056
def ruangan203(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 7], [4, 7], [4, 10], [-3, 10], [-3, 7]]])
#<NAME> - 1174056
def ruangan204(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 10], [4, 10], [4, 13], [-3, 13], [-3, 10]]])
#<NAME> - 1174038
def prodiD3MB(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 7], [29, 7], [29, 10], [22, 10], [22, 7]]])
#<NAME> - 1174038
def prodiD3LB(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 1], [29, 1], [29, 7], [22, 7], [22, 1]]])
#<NAME>-1174041
def ruangan211(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 31], [4, 31], [4, 34], [-3, 34], [-3, 31]]])
#<NAME>-1174041
def ruangan212(self, label):
self.kedua.record(label)
self.kedua.poly([[[-3, 34], [4, 34], [4, 37], [-3, 37], [-3, 34]]])
#<NAME> - 1174034
def ruangan221(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 19], [29, 19], [29, 22], [22, 22], [22, 19]]])
#<NAME> - 1174034
def ruangan222(self, label):
self.kedua.record(label)
self.kedua.poly([[[22, 16], [29, 16], [29, 19], [22, 19], [22, 16]]])
#Pasang fungsi baru diatas close()
def close(self):
self.kedua.close()
|
StarcoderdataPython
|
3381828
|
<reponame>divine-dragonflies/codejam-summer-2021<filename>src/__main__.py<gh_stars>0
"""The entry point to this game."""
from time import sleep
from typing import List
from blessed import Terminal
board = [
[
["w", "w", "w", "w", "w", "w", "w", "w", "w"],
["w", ".", ".", ".", ".", ".", ".", ".", "w"],
["w", ".", ".", ".", ".", ".", ".", "d", "w"],
["w", ".", ".", ".", ".", ".", ".", ".", "w"],
["w", "w", "w", "w", ".", "w", "w", "w", "w"],
["w", ".", ".", ".", ".", ".", ".", ".", "w"],
["w", ".", ".", ".", ".", ".", ".", ".", "w"],
["w", "p", ".", ".", ".", ".", ".", ".", "w"],
["w", "w", "w", "w", "w", "w", "w", "w", "w"],
],
[
["w", "w", "w", "w", "w", "w", "w", "w", "w"],
["w", ".", ".", ".", ".", "w", ".", ".", "w"],
["w", ".", "w", "w", ".", "w", ".", "d", "w"],
["w", ".", ".", ".", ".", ".", ".", ".", "w"],
["w", "w", "w", "w", ".", "w", "w", "w", "w"],
["w", ".", ".", ".", ".", ".", ".", ".", "w"],
["w", ".", ".", ".", ".", ".", ".", ".", "w"],
["w", "p", ".", ".", ".", ".", ".", ".", "w"],
["w", "w", "w", "w", "w", "w", "w", "w", "w"],
],
]
term = Terminal()
COLORPLAYER = term.blue_on_blue
COLORWALL = term.chartreuse4_on_chartreuse4
COLOREND = term.yellow_on_yellow
COLORAIR = term.white_on_white
COLORTERMINAL = term.white_on_white
class WinRound(BaseException):
pass
def draw_board(_board: List[List[str]]) -> None:
"""
Draws the game board.
:param _board: 2D list of strings that represent the game state.
"""
print(term.home + COLORTERMINAL + term.clear)
for line in _board:
currentcolor = None
accum = ""
for char in line:
if char == "." and currentcolor is not COLORAIR:
currentcolor = COLORAIR
accum += COLORAIR
elif char == "p" and currentcolor is not COLORPLAYER:
currentcolor = COLORPLAYER
accum += COLORPLAYER
elif char == "w" and currentcolor is not COLORWALL:
currentcolor = COLORWALL
accum += COLORWALL
elif char == "d" and currentcolor is not COLOREND:
currentcolor = COLOREND
accum += COLOREND
accum += char
print(accum)
def find_symbol(_board: List, char):
row_number = 0
for row in _board:
if char in row:
player_index_x = row_number
player_index_y = row.index(char)
row_number += 1
return (player_index_x, player_index_y)
def collision(_board, x=0, y=0) -> bool:
rows = len(_board) - 1
columns = len(_board[0]) - 1
if x == 0 or y == 0:
return False
elif x == rows or y == columns:
return False
elif _board[x][y] == "w":
return False
else:
return True
def check_win(_board, x, y):
win_x, win_y = find_symbol(_board, "d")
if win_x == x and win_y == y:
return True
else:
return False
def move(_board: List, direction: str) -> List:
player_x, player_y = find_symbol(_board, "p")
if direction in ("btn_left", "btn_right"):
action = {
"btn_left": (player_x, player_y - 1),
"btn_right": (player_x, player_y + 1),
}
player_new_x, player_new_y = action[direction]
if collision(_board, player_new_x, player_new_y):
_board[player_x][player_y] = "."
_board[player_new_x][player_new_y] = "p"
else:
rotate_action = {
"btn_up": rotate_board(_board, 1),
"btn_down": rotate_board(_board, 2),
}
_board = rotate_action[direction]
player_x, player_y = find_symbol(_board, "p")
if check_win(_board, player_x, player_y):
raise WinRound
return _board
def gravity(_board: List):
player_x, player_y = find_symbol(_board, "p")
while collision(_board, player_x + 1, player_y):
if check_win(_board, player_x + 1, player_y):
raise WinRound
_board[player_x][player_y] = "."
player_x += 1
_board[player_x][player_y] = "p"
return _board
def key_mapping(key: str, _board: List):
action = {
"a": "btn_left",
"d": "btn_right",
"w": "btn_up",
"s": "btn_down",
}
_board = move(_board, action[key])
return gravity(_board)
def rotate_board(_board, rotate_direction):
n = len(_board)
m = len(_board[1])
new_board = []
if rotate_direction == 0:
return _board
if rotate_direction == 1:
for i in range(m):
new_row = []
for j in range(n - 1, -1, -1):
new_row.append(_board[j][i])
new_board.append(new_row)
if rotate_direction == 2:
for i in range(n - 1, -1, -1):
new_row = []
for j in range(m):
new_row.append(_board[j][i])
new_board.append(new_row)
return new_board
if __name__ == "__main__":
with term.fullscreen(), term.cbreak():
boardcount = 0
draw_board(board[boardcount])
val = term.inkey()
# It will be working until ESCAPE pressed
while val.code != term.KEY_ESCAPE:
val = term.inkey()
if val and str(val) in "wasd":
try:
board[boardcount] = key_mapping(str(val), board[boardcount])
draw_board(board[boardcount])
except WinRound:
boardcount += 1
if boardcount > len(board) - 1:
print(term.green_on_black + "You Win")
sleep(5)
break
else:
draw_board(board[boardcount])
else:
pass
|
StarcoderdataPython
|
3399928
|
<reponame>svenfritsch/django-elo-rating
import math
from django.db import models
from django.conf import settings
class EloRated(models.Model):
elo_rating = models.FloatField(default=settings.ELO_START_VALUE)
class Meta:
abstract = True
def probability(self, opponent):
return 1 / (1 + math.pow(10, (opponent.elo_rating - self.elo_rating) / 400))
def updated_elo(self, opponent, result):
return self.elo_rating + settings.ELO_FACTOR_K * (result - self.probability(opponent))
|
StarcoderdataPython
|
1637662
|
"""
Collection of basic python function for meteorological library.
"""
__author__ = "<NAME>"
__version__ = '0.1.0'
|
StarcoderdataPython
|
1702993
|
import ntpath
import os
import argparse
from datetime import datetime
import logging
from pypgrest import Postgrest
import pandas as pd
import boto3
from dotenv import load_dotenv
import utils
# Envrioment variables
AWS_ACCESS_ID = os.getenv("AWS_ACCESS_ID")
AWS_PASS = os.getenv("AWS_PASS")
BUCKET_NAME = os.getenv("BUCKET_NAME")
POSTGREST_TOKEN = os.getenv("POSTGREST_TOKEN")
POSTGREST_ENDPOINT = os.getenv("POSTGREST_ENDPOINT")
def handle_year_month_args(year, month, lastmonth, aws_s3_client):
"""
Parameters
----------
year : Int
Argument provided value for year.
month : Int
Argument provided value for month.
lastmonth : Bool
Argument that determines if the previous month should also be queried.
aws_s3_client : boto3 client object
For sending on to get_csv_list
Returns
-------
csv_file_list : List
A list of the csv files to be downloaded and upsert to Postgres.
"""
# If args are missing, default to current month and/or year
if not year:
f_year = datetime.now().year
else:
f_year = year
if not month:
f_month = datetime.now().month
else:
f_month = month
csv_file_list = get_csv_list(f_year, f_month, aws_s3_client)
if not month and not year:
if lastmonth == True:
prev_month = f_month - 1
prev_year = f_year
if prev_month == 0:
prev_year = prev_year - 1
prev_month = 12
logger.debug(
f"Getting data from folders: {prev_month}-{prev_year} and {f_month}-{f_year}"
)
prev_list = get_csv_list(prev_year, prev_month, aws_s3_client)
csv_file_list.extend(prev_list)
else:
logger.debug(f"Getting data from folders: {f_month}-{f_year}")
csv_file_list = [f for f in csv_file_list if f.endswith(".csv")]
return csv_file_list
def get_file_name(file_key):
"""
Returns the name of an email file based on the full s3 file path
:param file_key: the file path
:return: string
"""
return ntpath.basename(file_key)
def get_csv_list(year, month, client):
"""
Returns an array of files parsed into an actual array (as opposed to an object)
:return: array of strings
"""
csv_file_list = []
pending_csv_list = aws_list_files(year, month, client)
for csv_file in pending_csv_list:
csv_file_list.append(csv_file)
# Remove the first item, it is not needed
# since it is just the name of the folder
csv_file_list.pop(0)
# Finally return the final list
return csv_file_list
def aws_list_files(year, month, client):
"""
Returns a list of email files.
:return: object
"""
response = client.list_objects(
Bucket=BUCKET_NAME,
Prefix="meters/prod/transaction_history/" + str(year) + "/" + str(month),
)
for content in response.get("Contents", []):
yield content.get("Key")
def get_invoice_id(banking_id, terminal_code):
"""Create the Inovice ID which is a concatention of the banking ID and device ID
Args:
banking_id (int): whatever this is
terminal_code (int): whatever this is
Returns:
int: The formatted invoice ID
"""
if pd.isna(banking_id) and terminal_code:
return -1
if banking_id == 0:
return -1
# get last 4 digits of terminal code
terminal_code = str(terminal_code)[-4:]
# zero-pad bank ID to 6 digits
banking_id = f"{banking_id:06d}"
invoice_id = f"{terminal_code}{banking_id}"
return int(invoice_id)
def postgres_datetime(time_field):
"""Changes the existing datetime field in S3 to a format that can be stored by postgres.
First parses the string time as datetime type then outputs as string.
Args:
time_field (string): Datetime field used by smartfolio.
Sent in a lambda function from a pandas series.
Returns:
output (string): Formatted datetime field that is compatable with postgres
"""
output = pd.to_datetime(
time_field, format="%Y-%m-%d %H:%M:%S", infer_datetime_format=True
)
return str(output)
def transform(smartfolio):
"""Formats and adds/drops columns of a dataframe from smartfolio to conform
to postgres DB schema.
Args:
smartfolio (pandas dataframe): The unformatted data stored in S3 from smartfolio.
Returns:
smartfolio (pandas dataframe): Formatted dataframe that works with DB schema.
"""
# Drop dupes as there are some overlapping dates in the smartfolio CSVs, keep latest
smartfolio = smartfolio.drop_duplicates(subset=["SYSTEM_ID"], keep="last")
## Column wrangling
smartfolio["id"] = smartfolio["SYSTEM_ID"]
smartfolio["Banking Id"] = smartfolio["CARD_TRANS_ID"].astype("Int64")
smartfolio["Terminal Code"] = smartfolio["METER_CODE"]
smartfolio["invoice_id"] = smartfolio.apply(
lambda x: get_invoice_id(x["Banking Id"], x["Terminal Code"]), axis=1
)
# Date/time wrangling
smartfolio["duration_min"] = smartfolio["TOTAL_DURATION"] / 60
smartfolio["datetime"] = smartfolio.apply(
lambda x: postgres_datetime(x["SERVER_DATE"]), axis=1
)
smartfolio["start_time"] = smartfolio.apply(
lambda x: postgres_datetime(x["METER_DATE"]), axis=1
)
smartfolio["end_time"] = smartfolio.apply(
lambda x: postgres_datetime(x["END_DATE"]), axis=1
)
smartfolio["timestamp"] = smartfolio["datetime"]
# Payment type column cleanup
smartfolio.loc[smartfolio["PAYMENT_MEAN"] == "CARD_1_0", ["PAYMENT_MEAN"]] = "CARD"
smartfolio.loc[
smartfolio["PAYMENT_MEAN"] == "CARD_0_116", ["PAYMENT_MEAN"]
] = "CARD"
# Renaming columns for schema
smartfolio = smartfolio.rename(
columns={
"PAYMENT_MEAN": "payment_method",
"Terminal Code": "meter_id",
"AMOUNT": "amount",
"TRANSACTION_TYPE": "transaction_type",
}
)
# Data types for schema
smartfolio["invoice_id"] = smartfolio["invoice_id"].astype(int)
smartfolio["meter_id"] = smartfolio["meter_id"].astype(int)
smartfolio["end_time"] = smartfolio["end_time"].replace("NaT", None)
# This handles the case where the duration field is null.
# Replace with the actual duration between start/end times.
smartfolio["start_datetime"] = pd.to_datetime(
smartfolio["start_time"], format="%Y-%m-%d %H:%M:%S", infer_datetime_format=True
)
smartfolio["end_datetime"] = pd.to_datetime(
smartfolio["end_time"], format="%Y-%m-%d %H:%M:%S", infer_datetime_format=True
)
smartfolio["timedelta"] = (
smartfolio["end_datetime"] - smartfolio["start_datetime"]
).dt.total_seconds()
smartfolio["timedelta"] = smartfolio["timedelta"] / 60
smartfolio.loc[smartfolio["duration_min"].isna(), "duration_min"] = smartfolio.loc[
smartfolio["duration_min"].isna(), "timedelta"
]
# Only subset of columns needed for schema
smartfolio = smartfolio[
[
"id",
"invoice_id",
"transaction_type",
"payment_method",
"meter_id",
"timestamp",
"duration_min",
"start_time",
"end_time",
"amount",
]
]
return smartfolio
def to_postgres(smartfolio):
"""Uploads the formatted dataframe to two different postgres DBs.
flowbird_transactions_raw - just for smartfolio aka flowbird data
transactions - a combined parking DB which will also include data from passport
Args:
smartfolio (pandas dataframe): Formatted dataframe that works with DB schema.
Returns:
None
"""
payload = smartfolio.to_dict(orient="records")
client = Postgrest(
POSTGREST_ENDPOINT,
token=POSTGREST_TOKEN,
headers={"Prefer": "return=representation"},
)
res = client.upsert(resource="flowbird_transactions_raw", data=payload)
# Send data to the combined transactions dataset
smartfolio = smartfolio[
[
"id",
"payment_method",
"meter_id",
"duration_min",
"start_time",
"end_time",
"amount",
]
]
smartfolio.loc[:, "source"] = "Parking Meters"
payload = smartfolio.to_dict(orient="records")
res = client.upsert(resource="transactions", data=payload)
def main(args):
aws_s3_client = boto3.client(
"s3", aws_access_key_id=AWS_ACCESS_ID, aws_secret_access_key=AWS_PASS,
)
csv_file_list = handle_year_month_args(
args.year, args.month, args.lastmonth, aws_s3_client
)
# Go through all files and combine into a dataframe
for csv_f in csv_file_list:
# Parse the file
response = aws_s3_client.get_object(Bucket=BUCKET_NAME, Key=csv_f)
df = pd.read_csv(response.get("Body"))
logger.debug(f"Loaded CSV File: {csv_f}")
df = transform(df)
to_postgres(df)
# CLI arguments definition
parser = argparse.ArgumentParser()
parser.add_argument(
"--year", type=int, help=f"Year of folder to select, defaults to current year",
)
parser.add_argument(
"--month", type=int, help=f"Month of folder to select. defaults to current month",
)
parser.add_argument(
"--lastmonth",
type=bool,
help=f"Will download from current month folder as well as previous.",
default=False,
)
args = parser.parse_args()
logger = utils.get_logger(__file__, level=logging.DEBUG)
main(args)
|
StarcoderdataPython
|
1745305
|
"""
Given an unsorted array of integers, find the length of the longest consecutive elements sequence.
For example,
Given [100, 4, 200, 1, 3, 2],
The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4.
Your algorithm should run in O(n) complexity.
"""
class Solution:
# @param num, a list of integer
# @return an integer
def longestConsecutive(self, num):
num_dict = {}
for i in num:
if i not in num_dict:
num_dict[i] = True
ret = 1
for i in num:
if i not in num_dict:
continue
length = 1
j = i
while j + 1 in num_dict:
length += 1
num_dict.pop(j+1, None)
j += 1
j = i
while j - 1 in num_dict:
length += 1
num_dict.pop(j-1, None)
j -= 1
ret = max(ret, length)
num_dict.pop(i, None)
return ret
# Other methods are not O(n) solution
|
StarcoderdataPython
|
1771927
|
import pymysql
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
webapp_python_flask_sqlalchemy_mysql_app = Flask(__name__, instance_relative_config=True)
webapp_python_flask_sqlalchemy_mysql_app.config.from_object("config")
webapp_python_flask_sqlalchemy_mysql_app.config.from_pyfile("config.py")
db = SQLAlchemy(webapp_python_flask_sqlalchemy_mysql_app)
from models import HelloWorld
@webapp_python_flask_sqlalchemy_mysql_app.route("/")
def wsgi_script_alias_root():
hello_world = HelloWorld.query.one()
return str(hello_world.hello_world)
|
StarcoderdataPython
|
3318273
|
import tensorflow as tf
validation_dir="fooval/val" #"/datasets/ImageNet/ILSVRC/Data/CLS-LOC/val" #"val"
BATCH_SIZE = 32
IMG_SIZE = (224,224)
#model = tf.keras.applications.mobilenet
#pretrained = tf.keras.applications.MobileNet
model = tf.keras.applications.mobilenet_v2
pretrained = tf.keras.applications.MobileNetV2
#model = tf.keras.applications.mobilenet_v3
#pretrained = tf.keras.applications.MobileNetV3Small
#pretrained = tf.keras.applications.MobileNetV3Large
ds = tf.keras.preprocessing.image_dataset_from_directory(
validation_dir,
shuffle=False,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
def resize_with_crop(image, label):
i = image
i = tf.cast(i, tf.float32)
i = tf.image.resize_with_crop_or_pad(i, 224, 224)
i = model.preprocess_input(i)
return (i, label)
# Preprocess the images
ds = ds.map(resize_with_crop)
# Compile the model
pretrained_model = pretrained(include_top=True, weights='imagenet')
pretrained_model.trainable = False
pretrained_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
decode_predictions = model.decode_predictions
# Print Accuracy
result = pretrained_model.evaluate(ds)
print(dict(zip(pretrained_model.metrics_names, result)))
|
StarcoderdataPython
|
3238572
|
"""
Read file into texts and calls.
It's ok if you don't understand how to read files
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 2: Which telephone number spent the longest time on the phone
during the period? Don't forget that time spent answering a call is
also time spent on the phone.
Print a message:
"<telephone number> spent the longest time, <total time> seconds, on the phone during
September 2016.".
"""
totalTimeLog = {}
for call in calls:
if call[0] not in totalTimeLog:
totalTimeLog[call[0]] = int(call[3]);
else:
totalTimeLog[call[0]] += int(call[3]);
if call[1] not in totalTimeLog:
totalTimeLog[call[1]] = int(call[3]);
else:
totalTimeLog[call[1]] += int(call[3]);
longestCaller = max(totalTimeLog, key=totalTimeLog.get)
print(longestCaller + " spent the longest time, " + str(totalTimeLog[longestCaller]) + " seconds, on the phone during September 2016.")
|
StarcoderdataPython
|
4810341
|
<reponame>lidofinance/depositor-bot<filename>scripts/utils/metrics.py<gh_stars>1-10
from prometheus_client.metrics import Gauge, Counter
DEPOSITOR_PREFIX = 'depositor_bot_'
BUILD_INFO = Gauge(f'{DEPOSITOR_PREFIX}build_info', 'Build info', [
'name',
'network',
'max_gas_fee',
'contract_gas_limit',
'gas_fee_percentile_1',
'gas_fee_percentile_days_history_1',
'gas_fee_percentile_2',
'gas_fee_percentile_days_history_2',
'gas_priority_fee_percentile',
'min_priority_fee',
'max_priority_fee',
'kafka_topic',
'account_address',
'create_transactions',
])
GAS_FEE = Gauge(f'{DEPOSITOR_PREFIX}gas_fee', 'Gas fee', ['type'])
DEPOSIT_FAILURE = Counter(f'{DEPOSITOR_PREFIX}deposit_failure', 'Deposit failure')
SUCCESS_DEPOSIT = Counter(f'{DEPOSITOR_PREFIX}deposit_success', 'Deposit done')
ACCOUNT_BALANCE = Gauge(f'{DEPOSITOR_PREFIX}account_balance', 'Account balance')
KAFKA_DEPOSIT_MESSAGES = Gauge(f'{DEPOSITOR_PREFIX}kafka_deposit_messages', 'Guardians deposit messages', ['address', 'version'])
KAFKA_PAUSE_MESSAGES = Gauge(f'{DEPOSITOR_PREFIX}kafka_pause_messages', 'Guardians pause messages', ['address', 'version'])
KAFKA_PING_MESSAGES = Gauge(f'{DEPOSITOR_PREFIX}kafka_ping_messages', 'Guardians ping messages', ['address', 'version'])
CURRENT_QUORUM_SIZE = Gauge(f'{DEPOSITOR_PREFIX}quorum_size', 'Current quorum size')
BUFFERED_ETHER = Gauge(f'{DEPOSITOR_PREFIX}buffered_ether', 'Buffered ether')
OPERATORS_FREE_KEYS = Gauge(f'{DEPOSITOR_PREFIX}operator_free_keys', 'Has free keys')
CREATING_TRANSACTIONS = Gauge(f'{DEPOSITOR_PREFIX}creating_transactions', 'Creating transactions', ['bot'])
|
StarcoderdataPython
|
1655018
|
<filename>tests/home/test_home.py
from unittest import TestCase
import sys
sys.path.insert(1, '../../apps/models/user_package/user')
sys.path.insert(2, '../../apps/models/user_package/schema')
from user_model import User
class HomeTests(TestCase):
def test_user(self):
user = User(email='<EMAIL>', password='<PASSWORD>', full_name='Teste OSchema', cpf_cnpj="00609568019")
# schema = user_schema.UserSchema()
# result = schema.dump(user)
print(user)
|
StarcoderdataPython
|
3322374
|
from django.urls import path, include
from . import views
from mainapp.main import views as main_views
from mainapp.video import views as video_views
main_patterns = [
path('',main_views.main),
path('signup/', main_views.signup, name='signup'),
path('signin/', main_views.signin, name='signin'),
]
video_patterns = [
path('', video_views.swipe),
path('show/', video_views.show, name='show'),
path('detail/<int:id>', video_views.detail, name='detail'),
path('like/', video_views.like, name='like'),
]
urlpatterns = [
path('main/', include(main_patterns)),
path('video/', include(video_patterns)),
]
|
StarcoderdataPython
|
3386676
|
<gh_stars>0
def get_departments():
departments = {
"eee": "10056",
"computer_science": "10024",
"mechanical": "10141",
"civil": "10110",
"mechatronics": "10152",
"biomedical": "10032",
"industrial": "10059"
}
return departments
|
StarcoderdataPython
|
3247399
|
import abc
import socket
class ProxyError(Exception):
pass
class Proxy(object):
"""
The Proxy class serves as a template for the implementation of specific proxy's. The Proxy class is an abstract
class, so it can not function on its own. The HttpProxy class is an example which is based on this class.
"""
def __init__(self, proxy_address):
"""
Constructor. Normally called from the child.
:param proxy_address: The address of the proxy in the form of a Destination object.
"""
self._proxy_address = proxy_address
# Begin helper methods. These methods automate repeating tasks. The methods are shared with all classes that
# extend from this class. Helper methods are usually static.
@staticmethod
def _init_connection(ip, port):
"""
Method for initiating a standard ipv4 streaming socket.
:param ip: The ip of the target
:param port: The port of the target
:return: A new socket object
"""
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_socket.connect((ip, port))
except socket.error as error:
raise ProxyError("Failed to connect to " + ip + ":" + str(port) + ". " + str(error))
return my_socket
@staticmethod
def _read_until_empty(socket_obj, buffer_size=1024):
"""
Method for reading from a socket until the buffer is empty.
:param socket_obj: The socket object to read from
:param buffer_size: The size of the buffer which is used to read from the socket (default= 1024)
:return: The received bytes
"""
if socket_obj:
response = b""
while True:
data = socket_obj.recv(buffer_size)
if not data:
break
response += data
return response
else:
raise ValueError("socket_obj can not be empty")
# End helper methods.
# Begin abstract methods. These methods have to be implemented by the class which extends from the Proxy class.
# The children of from this class can make used of the helper methods to make the implementation easier.
def connect(self, destination):
"""
Connect to the given destination.
:param destination: The target destination (Destination object)
"""
self.close()
self._connect(destination)
@abc.abstractmethod
def _connect(self, destination):
return
@abc.abstractmethod
def is_connected(self):
"""
Checks if the proxy is connected to a destination.
:return: True if the proxy is connected to a destination or False if the proxy is not connected.
"""
return
def send(self, payload):
"""
Send the given payload to the destination (use after the connect method).
:param payload: The payload to send to the target destination
"""
if self.is_connected():
self._send(payload)
else:
raise ProxyError("First call the connect method before sending anything")
@abc.abstractmethod
def _send(self, payload):
return
def receive(self):
"""
Receive the answer from the other side of the connection.
"""
if self.is_connected():
return self._receive()
else:
raise ProxyError("First call the connect method before receiving anything")
@abc.abstractmethod
def _receive(self):
return
def close(self):
"""
Close the connection. You can connect to another destination by using the connect method.
"""
if self.is_connected():
self._close()
@abc.abstractmethod
def _close(self):
return
@abc.abstractmethod
def copy(self):
"""
Creates a new proxy object with the same configuration and returns it.
:return: A copy of the proxy object
"""
return
# End abstract methods.
|
StarcoderdataPython
|
90752
|
<reponame>sytelus/axformer
from axformer import trainer
def main():
trainer.train()
|
StarcoderdataPython
|
3393049
|
"""
Faça um programa que mostre na tela uma contagem regressiva para o estouro de fogos de artifício,
indo de 10 até 0, com uma pausa de 1 segundo entre eles.
"""
from time import sleep
import emoji
for c in range(10, 0, -1):
print(c)
sleep(1)
if c == 1:
print(0)
print(emoji.emojize(":sparkles: :tada: \033[1;31mFOGOS!\033[m :tada: :sparkles:", use_aliases=True))
|
StarcoderdataPython
|
3344332
|
""" --- Brackets --- Elementary
You are given an expression with numbers, brackets and operators.
For this task only the brackets matter. Brackets come in three flavors:
"{}" "()" or "[]". Brackets are used to determine scope or to restrict
some expression. If a bracket is open, then it must be closed with
a closing bracket of the same type. The scope of a bracket must
not intersected by another bracket. In this task you should make
a decision, whether to correct an expression or not based
on the brackets. Do not worry about operators and operands.
Input: An expression with different of types brackets
as a string (unicode).
Output: A verdict on the correctness of the expression
in boolean (True or False).
How it is used: When you write code or complex expressions in
a mathematical package, you can get a huge headache
when it comes to excess or missing brackets.
This concept can be useful for your own IDE.
Precondition: There are only brackets ("{}" "()" or "[]"),
digits or operators ("+" "-" "*" "/").
0 < len(expression) < 103
"""
BRACKETS = ('{', '(', '[', ']', ')', '}')
def my_solution(expression):
def _reverse_bracket(br):
return ')' if br == '(' else ']' if br == '[' else '}'
brackets = [ch for ch in expression if ch in BRACKETS]
stack = []
for bracket in brackets:
# Open bracket
if bracket in BRACKETS[:3]:
stack.append(bracket)
# Close bracket
if bracket in BRACKETS[3:]:
if not stack:
return False
if bracket == _reverse_bracket(stack[-1]):
stack = stack[:-1]
else:
return False
return not stack
def veky_solution(data):
stack = ['']
brackets = {'(': ')', '[': ']', '{': '}'}
for c in data:
if c in brackets:
stack.append(brackets[c])
elif c in brackets.values() and c != stack.pop():
return False
return stack == [""]
|
StarcoderdataPython
|
3236082
|
<reponame>IndigoPurple/EFENet<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Portions Copyright (c) 2014 CiiNOW Inc.
Written by <NAME> <<EMAIL>>, <<EMAIL>>
2014-03-03 Ported from matlab to python/numpy/scipy
2014-03-04 Added utility functions to read/compare images and video
"""
"""
-----------COPYRIGHT NOTICE STARTS WITH THIS LINE------------
Copyright (c) 2005 The University of Texas at Austin
All rights reserved.
Permission is hereby granted, without written agreement and without license or royalty fees, to use, copy,
modify, and distribute this code (the source files) and its documentation for
any purpose, provided that the copyright notice in its entirety appear in all copies of this code, and the
original source of this code, Laboratory for Image and Video Engineering (LIVE, http://live.ece.utexas.edu)
at the University of Texas at Austin (UT Austin,
http://www.utexas.edu), is acknowledged in any publication that reports research using this code. The research
is to be cited in the bibliography as:
<NAME> and <NAME>, "Image Information and Visual Quality", IEEE Transactions on
Image Processing, (to appear).
IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT AUSTIN BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL,
OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS DATABASE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF TEXAS
AT AUSTIN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
THE UNIVERSITY OF TEXAS AT AUSTIN SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE DATABASE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THE UNIVERSITY OF TEXAS AT AUSTIN HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-----------COPYRIGHT NOTICE ENDS WITH THIS LINE------------
This software release consists of a MULTISCALE PIXEL DOMAIN, SCALAR GSM implementation of the algorithm described in the paper:
<NAME> and <NAME>, "Image Information and Visual Quality"., IEEE Transactions on Image Processing, (to appear).
Download manuscript draft from http://live.ece.utexas.edu in the Publications link.
THE PIXEL DOMAIN ALGORITHM IS NOT DESCRIBED IN THE PAPER. THIS IS A COMPUTATIONALLY SIMPLER
DERIVATIVE OF THE ALGORITHM PRESENTED IN THE PAPER
Input : (1) img1: The reference image as a matrix
(2) img2: The distorted image (order is important)
Output: (1) VIF the visual information fidelity measure between the two images
Default Usage:
Given 2 test images img1 and img2, whose dynamic range is 0-255
vif = vifvec(img1, img2);
Advanced Usage:
Users may want to modify the parameters in the code.
(1) Modify sigma_nsq to find tune for your image dataset.
Email comments and bug reports to <EMAIL>
"""
import numpy
import scipy.signal
import scipy.ndimage
def vifp_mscale(ref, dist):
sigma_nsq = 2
eps = 1e-10
num = 0.0
den = 0.0
for scale in range(1, 5):
N = 2 ** (4 - scale + 1) + 1
sd = N / 5.0
if (scale > 1):
ref = scipy.ndimage.gaussian_filter(ref, sd)
dist = scipy.ndimage.gaussian_filter(dist, sd)
ref = ref[::2, ::2]
dist = dist[::2, ::2]
mu1 = scipy.ndimage.gaussian_filter(ref, sd)
mu2 = scipy.ndimage.gaussian_filter(dist, sd)
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_sq = scipy.ndimage.gaussian_filter(ref * ref, sd) - mu1_sq
sigma2_sq = scipy.ndimage.gaussian_filter(dist * dist, sd) - mu2_sq
sigma12 = scipy.ndimage.gaussian_filter(ref * dist, sd) - mu1_mu2
sigma1_sq[sigma1_sq < 0] = 0
sigma2_sq[sigma2_sq < 0] = 0
g = sigma12 / (sigma1_sq + eps)
sv_sq = sigma2_sq - g * sigma12
g[sigma1_sq < eps] = 0
sv_sq[sigma1_sq < eps] = sigma2_sq[sigma1_sq < eps]
sigma1_sq[sigma1_sq < eps] = 0
g[sigma2_sq < eps] = 0
sv_sq[sigma2_sq < eps] = 0
sv_sq[g < 0] = sigma2_sq[g < 0]
g[g < 0] = 0
sv_sq[sv_sq <= eps] = eps
num += numpy.sum(numpy.log10(1 + g * g * sigma1_sq / (sv_sq + sigma_nsq)))
den += numpy.sum(numpy.log10(1 + sigma1_sq / sigma_nsq))
vifp = num / den
return vifp
|
StarcoderdataPython
|
3323807
|
from enum import Enum
from typing import Any, Union
from .connection import Connection
class HTTPRequestTypes(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
HEAD = "HEAD"
class HTTPConnection(Connection):
"""
A Connection that utilizes HTTP
HTTP is useful for communicating with HTTP Servers over the web, although slower than other more
low level connection types
"""
def __init__(self, session, default_url=None, headers=None, auth=None):
import requests
self.session = session or requests.Session()
self.session.headers.update(headers)
if auth:
self.auth = auth
self.session.auth(auth)
self.url = default_url
def send_request(self, data: Any, url: str, method: HTTPRequestTypes, **kwargs):
"""
send_request is the function used to send a request using HTTP protocol.
This can be used to send request to servers running remotely in the internet or to an HTTP server in the LAN.
Note: This is the inner function used by the Connection object to send and receive.
The functions send, receive and interval_receive should be used instead!
:param data: The data to be passed as the request's payload
:param url: the url to send the request to
:param method: the HTTP method to be used
:param kwargs: any other parameters to requests.request
:return: the response received for the request
"""
import requests
params = {"data": data}
if "params" in kwargs.keys():
params = {**kwargs["params"], "data": data}
url = url or self.url
response = requests.request(method.value, url, params=params, **kwargs)
response.raise_for_status()
return response
def send(self, data: Any, url: str = None, method=HTTPRequestTypes.POST, **kwargs):
"""
Sends an HTTP requests, should be used to only send data and not receive any data
as it represent the logical action of sending information
:param data: the data to be sent, passed
:param url: the url to send it to
:param method: the request method (GET, POST, DELETE etc.)
:param kwargs: any other parameter to be passed the request ()
:return:
"""
return self.send_request(data, url, method, **kwargs)
def receive(self, data: Any = None, url: str = None, method: HTTPRequestTypes = HTTPRequestTypes.GET,
interval: Union[float, bool] = False, **kwargs):
"""
Receive for HTTPConnection is a bit different compared to other Connection in that it sends a request
and returns a response which is the main usage, in addition the interval parameter allows for high
interval sampling (for MultiVision for example) without sending many requests
:param url: the url to send to, overrides self.url
:param data: any data to pass with the requests
:param method: determines what HTTP method should the request use (GET, POST, PUT etc.)
:param interval:
:param kwargs: any other parameters passed to Request.request
:return:
"""
if interval:
return self.interval_receive(data, url, method, **kwargs)
else:
self.send_request(data, url, method, **kwargs)
def interval_receive(self, data: Any, url: str = None, method=HTTPRequestTypes.GET, **kwargs):
pass
|
StarcoderdataPython
|
1630708
|
"""
Chess board
No computer player yet
Sucks in other ways too
TO DO: look over http://home.hccnet.nl/h.g.muller/max-src2.html
"""
## b = InitialChessBoard()
## print str(b)
#. rnbqkbnr
#. pppppppp
#.
#.
#.
#.
#. PPPPPPPP
#. RNBQKBNR
## pw = HumanPlayer(white)
## pb = HumanPlayer(black)
## b.outcome
## ' '.join(sorted(map(str, b.get_moves())))
#. 'a2-a3 a2-a4 b1-a3 b1-c3 b2-b3 b2-b4 c2-c3 c2-c4 d2-d3 d2-d4 e2-e3 e2-e4 f2-f3 f2-f4 g1-f3 g1-h3 g2-g3 g2-g4 h2-h3 h2-h4 resign'
## m = b.parse_move('resign')
## b1 = m.update(b)
## b1.outcome
#. 'black'
def main():
print "(Moves look like 'e2-e3')"
play_chess(HumanPlayer, HumanPlayer)
def play_chess(white_strategy, black_strategy):
return play(InitialChessBoard(), [white_strategy, black_strategy])
def play(board, strategies):
players = [strategy(side)
for strategy, side in zip(strategies, board.get_sides())]
while board.get_outcome() is None:
board = board.play_turn(players)
for player in players:
player.on_game_over(board)
class HumanPlayer:
def __init__(self, side):
self.side = side
def pick_move(self, board):
board.show()
while True:
string = raw_input('%s, your move? ' % self.side.capitalize())
try:
move = board.parse_move(string)
except MoveIllegal:
print 'Illegal move.'
else:
return move
def on_game_over(self, board):
board.show()
if board.get_outcome() is None:
pass
elif board.get_outcome() == self.side:
print '%s, you win!' % self.side.capitalize()
elif board.get_outcome() == 'draw':
print 'You draw.'
else:
print '%s, you lose!' % self.side.capitalize()
def InitialChessBoard():
squares = ['----------',
'-rnbqkbnr-',
'-pppppppp-',
'- -',
'- -',
'- -',
'- -',
'-PPPPPPPP-',
'-RNBQKBNR-',
'----------',]
return ChessBoard(white, squares, (False, False), None)
class MoveIllegal(Exception):
pass
class ChessBoard:
def __init__(self, mover, squares, castled, outcome):
self.mover = mover
self.squares = squares
self.castled = castled
self.outcome = outcome
def __str__(self):
return '\n'.join(line[1:-1] for line in self.squares[1:-1])
def has_castled(self, player):
return self.castled[player == black]
def get_outcome(self):
"Return None, 'draw', black, or white (meaning the winner)."
return self.outcome
def resign(self):
return ChessBoard(opponent(self.mover),
self.squares,
self.castled,
opponent(self.mover))
def move_piece(self, (r0, c0), (r1, c1)):
squares = list(map(list, self.squares))
piece = squares[r0][c0]
squares[r0][c0] = ' '
squares[r1][c1] = piece
return ChessBoard(opponent(self.mover),
list(map(''.join, squares)),
self.castled,
None) # XXX check for checkmate or draw
def show(self):
print self
def get_sides(self):
return (white, black)
def play_turn(self, (white_player, black_player)):
player = white_player if self.mover == white else black_player
move = player.pick_move(self)
if move in self.get_moves():
return move.update(self)
raise Exception("Bad move")
def parse_move(self, string):
for move in self.get_moves():
if move.matches(string):
return move
raise MoveIllegal()
def get_moves(self):
return [ResignMove()] + self.get_piece_moves()
def get_piece_moves(self):
return sum(map(self.moves_from, self.army(self.mover)), [])
def army(self, player):
for r, row in enumerate(self.squares):
for c, piece in enumerate(row):
if piece.isalpha() and piece.isupper() == (player == white):
yield r, c
def moves_from(self, pos):
return list(self.gen_moves_from(pos))
def gen_moves_from(self, (r, c)):
piece = self.squares[r][c]
piece, white = piece.upper(), piece.isupper()
def is_takeable(r1, c1):
return is_empty(r1, c1) or has_opponent(r1, c1)
def is_empty(r1, c1):
return self.squares[r1][c1] == ' '
def has_opponent(r1, c1):
there = self.squares[r1][c1]
return there.isalpha() and there.isupper() != white
def move_to(r1, c1):
return PieceMove((r, c), (r1, c1))
def move_freely(dirs):
for dr, dc in dirs:
for i in range(1, 9):
if is_empty(r+dr*i, c+dc*i):
yield move_to(r+dr*i, c+dc*i)
else:
if has_opponent(r+dr*i, c+dc*i):
yield move_to(r+dr*i, c+dc*i)
break
if piece in ' -':
pass
elif piece == 'P':
# TODO: pawn promotion
# TODO: en passant
forward = -1 if white else 1
if is_empty(r+forward, c):
yield move_to(r+forward, c)
if r == (7 if white else 2): # initial 2 steps
if is_empty(r+forward*2, c): yield move_to(r+forward*2, c)
if has_opponent(r+forward, c-1): yield move_to(r+forward, c-1)
if has_opponent(r+forward, c+1): yield move_to(r+forward, c+1)
elif piece == 'K':
# TODO castling
# TODO forbid moving into check
# (and this can apply to moves of other pieces)
for dr, dc in queen_dirs:
if is_takeable(r+dr, c+dc):
yield move_to(r+dr, c+dc)
elif piece == 'Q':
for move in move_freely(queen_dirs): yield move
elif piece == 'R':
for move in move_freely(rook_dirs): yield move
elif piece == 'B':
for move in move_freely(bishop_dirs): yield move
elif piece == 'N':
for dr, dc in knight_jumps:
if 1 <= r+dr <= 8 and 1 <= c+dc <= 8:
if is_takeable(r+dr, c+dc):
yield move_to(r+dr, c+dc)
else:
assert False
rook_dirs = [( 0, 1), ( 0,-1), ( 1, 0), (-1, 0)]
bishop_dirs = [(-1,-1), (-1, 1), ( 1,-1), ( 1, 1)]
queen_dirs = rook_dirs + bishop_dirs
knight_jumps = [( 2, 1), ( 2,-1), ( 1, 2), ( 1,-2),
(-2, 1), (-2,-1), (-1, 2), (-1,-2)]
white, black = 'white', 'black'
def opponent(side):
return black if side == white else white
class ResignMove:
def __eq__(self, other):
return isinstance(other, ResignMove)
def update(self, board):
return board.resign()
def matches(self, string):
return string.lower() == 'resign'
def matches(self, string):
return string.lower() == str(self)
def __str__(self):
return 'resign'
class PieceMove:
def __init__(self, from_pos, to_pos):
self.from_pos = from_pos
self.to_pos = to_pos
def __eq__(self, other):
return (isinstance(other, PieceMove)
and self.from_pos == other.from_pos
and self.to_pos == other.to_pos)
def update(self, board):
return board.move_piece(self.from_pos, self.to_pos)
def matches(self, string):
return string.lower() == str(self)
def __str__(self):
# XXX 'a' is top of board for Black?
fr, fc = self.from_pos
tr, tc = self.to_pos
return '%s%d-%s%d' % ('abcdefgh'[fc-1], 9-fr,
'abcdefgh'[tc-1], 9-tr)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1791236
|
<reponame>rochester-rcl/islandora-import-scripts<filename>ur/mods.py
#!/usr/bin/python
import xml.etree.ElementTree as ET
# #########################################################
# Represents all the MODS metadata classes for import
# #########################################################
class RecordInfo:
"""Holds record info information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'recordInfo')
if self.value:
top_level.text = self.value.strip()
return top_level
class RecordContentSource:
"""Holds content source information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'recordContentSource')
if self.value:
top_level.text = self.value.strip()
return top_level
class LanguageOfCataloging:
"""Holds language of cataloging information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'languageOfCataloging')
if self.value:
top_level.text = self.value.strip()
return top_level
class PhysicalDescription:
"""Holds physical description information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'physicalDescription')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class TypeOfResource:
"""Holds type of resource information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'typeOfResource')
top_level.text = self.value.strip()
return top_level
class Form:
"""Holds form information"""
def __init__(self):
self.authority = ''
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'form')
if self.authority:
top_level.set('authority', self.authority.strip())
top_level.text = self.value.strip()
return top_level
class InternetMediaType:
"""Holds internet media type information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'internetMediaType')
top_level.text = self.value.strip()
return top_level
class DigitalOrigin:
"""Holds digital origin information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'digitalOrigin')
top_level.text = self.value.strip()
return top_level
class Extent:
"""Holds extent information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'extent')
top_level.text = self.value.strip()
return top_level
class Abstract:
"""Holds abstract information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'abstract')
top_level.text = self.value.strip()
return top_level
class Publisher:
"""Holds publisher information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'publisher')
top_level.text = self.value.strip()
return top_level
class Subject:
"""Holds subject information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'subject')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class Topic:
"""Holds topic information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'topic')
top_level.text = self.value.strip()
return top_level
class Geographic:
"""Holds geographic information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'geographic')
top_level.text = self.value.strip()
return top_level
class Genre:
"""Holds genre information"""
def __init__(self):
self.authority = ''
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'genre')
if self.authority:
top_level.set('authority', self.authority.strip())
top_level.text = self.value.strip()
return top_level
class Note:
"""Holds note information"""
def __init__(self):
self.value = ''
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'note')
if self.type:
top_level.set('type', self.type.strip())
top_level.text = self.value.strip()
return top_level
class Role:
"""Holds role information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'role')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class Language:
"""Holds language information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'language')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class LanguageTerm:
"""Holds language term information"""
def __init__(self):
self.type = ''
self.value = ''
self.authority = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'languageTerm')
if self.type:
top_level.set('type', self.type.strip())
if self.authority:
top_level.set('authority', self.authority.strip())
top_level.text = self.value.strip()
return top_level
class RoleTerm:
"""Holds role term information"""
def __init__(self):
self.value = ''
self.authority = ''
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'roleTerm')
if self.type:
top_level.set('type', self.type.strip())
if self.authority:
top_level.set('authority', self.authority.strip())
top_level.text = self.value.strip()
return top_level
class Name:
"""Holds name information"""
def __init__(self):
self.type = ''
self.authority = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'name')
if self.type:
top_level.set('type', self.type.strip())
if self.authority:
top_level.set('authority', self.authority.strip())
return top_level
class NamePart:
"""Holds name part information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'namePart')
top_level.text = self.value.strip()
return top_level
class DateCreated:
"""Holds date created information"""
def __init__(self):
self.value = ''
self.encoding = ''
self.qualifier = ''
self.keyDate = ''
self.point = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'dateCreated')
if self.encoding:
top_level.set('encoding', self.encoding.strip())
if self.qualifier:
top_level.set('qualifier', self.qualifier.strip())
if self.keyDate:
top_level.set('keyDate', self.keyDate.strip())
if self.point:
top_level.set('point', self.point.strip())
top_level.text = self.value.strip()
return top_level
class DateIssued:
"""Holds date issued information"""
def __init__(self):
self.value = ''
self.encoding = ''
self.qualifier = ''
self.keyDate = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'dateIssued')
if self.encoding:
top_level.set('encoding', self.encoding.strip())
if self.qualifier:
top_level.set('qualifier', self.qualifier.strip())
if self.keyDate:
top_level.set('keyDate', self.keyDate.strip())
top_level.text = self.value.strip()
return top_level
class OriginInfo:
"""Holds origin info information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'originInfo')
return top_level
class Place:
"""Holds place information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'place')
return top_level
class PlaceTerm:
"""Holds place term information"""
def __init__(self):
self.type = ''
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'placeTerm')
if self.type:
top_level.set('type', self.type.strip())
top_level.text = self.value.strip()
return top_level
class RelatedItem:
"""Holds Releated Item information"""
def __init__(self):
self.type = ''
self.value = ''
self.display_label = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'relatedItem')
if self.type:
top_level.set('type', self.type.strip())
if self.display_label:
top_level.set('displayLabel', self.display_label.strip())
return top_level
class TitleInfo:
"""Holds identifier information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'titleInfo')
return top_level
class Title:
"""Holds title information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'title')
top_level.text = self.value.strip()
return top_level
class Location:
"""Holds identifier information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'location')
return top_level
class PhysicalLocation:
"""Holds physical location information"""
def __init__(self):
self.value = ''
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'physicalLocation')
if self.type:
top_level.set('type', self.type.strip())
top_level.text = self.value.strip()
return top_level
class ShelfLocator:
"""Holds shelf locator information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'shelfLocator')
top_level.text = self.value.strip()
return top_level
class HoldingSimple:
"""Holdings information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'holdingSimple')
top_level.text = self.value.strip()
return top_level
class CopyInformation:
"""Holdings information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'copyInformation')
top_level.text = self.value.strip()
return top_level
class EnumerationAndChronology:
def __init__(self):
self.value = ''
self.unit_type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'enumerationAndChronology')
top_level.text = self.value.strip()
if self.unit_type:
top_level.set('unitType', str(self.unit_type).strip())
return top_level
class AccessCondition:
"""Holds access condition information"""
def __init__(self):
self.value = ''
self.type = ''
self.display_label = ''
self.xlink = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'accessCondition')
if self.type:
top_level.set('type', self.type.strip())
if self.display_label:
top_level.set('displayLabel', self.display_label)
if self.xlink:
top_level.set('xlink:href', self.xlink)
top_level.text = self.value.strip()
return top_level
class Identifier:
"""Holds identifier information"""
def __init__(self):
self.type = ''
self.value = ''
self.display_label = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'identifier')
if self.type:
top_level.set('type', self.type.strip())
if self.display_label:
top_level.set('displayLabel', self.display_label)
top_level.text = self.value.strip()
return top_level
class Url:
"""Holds url information"""
def __init__(self):
self.value = ''
self.display_label = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'url')
if self.display_label:
top_level.set('displayLabel', self.display_label)
top_level.text = self.value.strip()
return top_level
|
StarcoderdataPython
|
44368
|
<filename>tests/test_gf.py
from crypto_math import GF, field_extension
import pytest
@pytest.fixture
def setup():
F7 = GF(7)
F7_4 = field_extension(F7, 4)
return F7_4
@pytest.mark.parametrize(
"x,y,expect",
[
([1, 1, 1, 1], [2, 3, 1], [1, 3, 4, 2]),
([2, 3, 1], [6, 1, 3, 1], [6, 3, 6, 2]),
([1, 1, 1, 1], [6, 1, 3, 1], [2, 4, 2]),
],
)
def test_add(setup, x, y, expect):
F7_4 = setup
x = F7_4(x)
y = F7_4(y)
assert x + y == F7_4(expect)
@pytest.mark.parametrize(
"x,y,expect",
[
([1, 1, 1, 1], [2, 3, 1], [1, 6, 5, 0]),
([2, 3, 1], [6, 1, 3, 1], [1, 1, 0, 0]),
([1, 1, 1, 1], [6, 1, 3, 1], [2, 0, 5, 0]),
([1, 1, 1, 1], [1, 1, 1, 1], [0]),
],
)
def test_sub(setup, x, y, expect):
F7_4 = setup
x = F7_4(x)
y = F7_4(y)
assert x - y == F7_4(expect)
@pytest.mark.parametrize(
"x,y,expect",
[
([1, 1, 0], [3, 2], [3, 5, 2, 0]),
([1, 1], [1, 3, 6], [1, 4, 2, 6]),
([1, 1, 1, 1], [2, 3, 1], [3, 1, 6, 0]),
([2, 3, 1], [6, 1, 3, 1], [4, 4, 2, 4]),
([1, 1, 1, 1], [6, 1, 3, 1], [1, 3, 0, 5]),
([1, 1, 1, 1], [0], [0]),
],
)
def test_mul(setup, x, y, expect):
F7_4 = setup
x = F7_4(x)
y = F7_4(y)
assert x * y == F7_4(expect)
@pytest.mark.parametrize(
"x,y,expect",
[
([1, 1, 1, 1], [2, 3, 1], [2, 6, 4, 6]),
([2, 3, 1], [6, 1, 3, 1], [0, 6, 5, 4]),
([1, 1, 1, 1], [6, 1, 3, 1], [2, 5, 1, 5]),
],
)
def test_division(setup, x, y, expect):
F7_4 = setup
x = F7_4(x)
y = F7_4(y)
assert x / y == F7_4(expect)
@pytest.mark.parametrize(
"x",
[
([1, 1, 1, 1]),
([2, 3, 1]),
([6, 1, 3, 1]),
],
)
def test_inverse(setup, x):
F7_4 = setup
x = F7_4(x)
assert x * x.inverse() == F7_4([1])
@pytest.mark.parametrize(
"x,e",
[
([1, 1, 1, 1], 3),
([2, 3, 1], 3),
([6, 1, 3, 1], 3),
([6, 1, 3, 1], 0),
([6, 1, 3, 1], 1),
],
)
def test_pow(setup, x, e):
F7_4 = setup
x = F7_4(x)
a = F7_4.one()
for _ in range(e):
a *= x
assert x ** e == a
|
StarcoderdataPython
|
167913
|
from django.shortcuts import render, get_object_or_404
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import RestaurantSerializer, RestaurantnamesSerializer, UserCollectionsSerializer, RestaurantCollectionsSerializer
from .models import Restaurant, Restaurant_names, UserCollections, RestaurantCollections
from rest_framework.decorators import action
from datetime import datetime
from django.contrib.auth.models import User
from .pagination import UserCollectionPagination
# Create your views here.
class RestaurantnamesListView(generics.ListCreateAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = RestaurantSerializer
def get_queryset(self):
name = self.kwargs.get('name')
queryset = Restaurant.objects.filter(restaurant__name__icontains=self.kwargs.get('name'))
return queryset
def get(self, request, *args, **kwargs):
name = self.kwargs.get('name', None)
if name is None:
return Response({'error': 'No query name provided.'}, status=status.HTTP_400_BAD_REQUEST)
return super().get(request, *args, **kwargs)
class RestaurantnamesDetailView(generics.RetrieveDestroyAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
queryset = Restaurant_names.objects.all()
serializer_class = RestaurantnamesSerializer
class RestaurantListView(generics.ListCreateAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
queryset = Restaurant.objects.all()
serializer_class = RestaurantSerializer
class RestaurantDetailView(generics.RetrieveDestroyAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
queryset = Restaurant.objects.all()
serializer_class = RestaurantSerializer
class RestaurentFilterView(generics.ListCreateAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = RestaurantSerializer
def get_queryset(self):
day = self.kwargs.get('day').lower()
query_time = self.kwargs.get('query_time')
query_name = self.kwargs.get('query_name', None)
if day == 'sunday':
queryset = Restaurant.objects.filter(opening_time__sunday__lte=query_time, closing_time__sunday__gt=query_time)
if day == 'monday':
queryset = Restaurant.objects.filter(opening_time__monday__lte=query_time, closing_time__monday__gt=query_time)
if day == 'tuesday':
queryset = Restaurant.objects.filter(opening_time__tuesday__lte=query_time, closing_time__tuesday__gt=query_time)
if day == 'wednesday':
queryset = Restaurant.objects.filter(opening_time__wednesday__lte=query_time, closing_time__wednesday__gt=query_time)
if day == 'thursday':
queryset = Restaurant.objects.filter(opening_time__thursday__lte=query_time, closing_time__thursday__gt=query_time)
if day == 'friday':
queryset = Restaurant.objects.filter(opening_time__friday__lte=query_time, closing_time__friday__gt=query_time)
if day == 'saturday':
queryset = Restaurant.objects.filter(opening_time__saturday__lte=query_time, closing_time__saturday__gt=query_time)
if query_name is not None:
queryset = queryset.filter(restaurant__name__icontains=query_name)
return queryset
def get(self, request, *args, **kwargs):
valid_days = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday']
day = self.kwargs.get('day', None)
if day is None:
return Response({'error': 'Please provide a day.'}, status=status.HTTP_400_BAD_REQUEST)
day = day.lower()
if day not in valid_days:
return Response({'error': 'Wrong day format. Please use one of "monday", "tuesday", "wednesday", "thursday", "friday", "saturday" or "sunday"'}, status=status.HTTP_400_BAD_REQUEST)
query_time = self.kwargs.get('query_time', None)
try:
datetime.strptime(query_time,'%H:%M:%S')
except ValueError:
return Response({'error': 'Wrong time format. Please use valid 24hr format'}, status=status.HTTP_400_BAD_REQUEST)
return super().get(request, *args, **kwargs)
class UserCollectionsCreateView(generics.ListCreateAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = UserCollectionsSerializer
pagination_class = UserCollectionPagination
def get_queryset(self):
user_id = self.kwargs.get('user_id')
queryset = UserCollections.objects.filter(collaborators__id=user_id)
return queryset
def get(self, request, *args, **kwargs):
user_id = self.kwargs.get('user_id', None)
if user_id is None:
return Response({'error': 'Please provide a user id.'}, status=status.HTTP_400_BAD_REQUEST)
return super().get(request, *args, **kwargs)
class RestaurantCollectionsCreateView(generics.ListCreateAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = RestaurantCollectionsSerializer
pagination_class = UserCollectionPagination
def get_queryset(self):
user_id = self.kwargs.get('user_id')
collection_name = self.kwargs.get('collection_name')
queryset = RestaurantCollections.objects.filter(restaurant_collection__collaborators__id=user_id, restaurant_collection__name=collection_name)
return queryset
def get(self, request, *args, **kwargs):
user_id = self.kwargs.get('user_id', None)
collection_name = self.kwargs.get('collection_name', None)
if user_id is None:
return Response({'error': 'Please provide a user id.'}, status=status.HTTP_400_BAD_REQUEST)
if collection_name is None:
return Response({'error': 'Please provide a collection name.'}, status=status.HTTP_400_BAD_REQUEST)
return super().get(request, *args, **kwargs)
class RestaurantCollectionsListView(generics.ListCreateAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = RestaurantCollectionsSerializer
pagination_class = UserCollectionPagination
def get_queryset(self):
user_id = self.kwargs.get('user_id')
restaurant_id = self.kwargs.get('restaurant_id')
queryset = RestaurantCollections.objects.filter(restaurant_collection__collaborators__id=user_id, restaurant__id=restaurant_id)
return queryset
def get(self, request, *args, **kwargs):
user_id = self.kwargs.get('user_id', None)
restaurant_id = self.kwargs.get('restaurant_id', None)
if user_id is None:
return Response({'error': 'Please provide a user id.'}, status=status.HTTP_400_BAD_REQUEST)
if restaurant_id is None:
return Response({'error': 'Please provide a restaurant id.'}, status=status.HTTP_400_BAD_REQUEST)
return super().get(request, *args, **kwargs)
class RestaurantCollectionsDestroyView(generics.RetrieveDestroyAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = RestaurantCollectionsSerializer
pagination_class = UserCollectionPagination
def get_queryset(self):
user_id = self.kwargs.get('user_id')
collection_name = self.kwargs.get('collection_name')
restaurant_id = self.kwargs.get('restaurant_id')
queryset = RestaurantCollections.objects.filter(restaurant_collection__collaborators__id=user_id, restaurant_collection__name=collection_name, restaurant__id=restaurant_id)
return queryset
def get_object(self):
queryset = self.get_queryset()
user_id = self.kwargs.get('user_id')
collection_name = self.kwargs.get('collection_name')
restaurant_id = self.kwargs.get('restaurant_id')
obj = get_object_or_404(queryset,restaurant_collection__collaborators__id=user_id, restaurant_collection__name=collection_name, restaurant__id=restaurant_id)
self.check_object_permissions(self.request, obj)
return obj
def delete(self, request, *args, **kwargs):
user_id = self.kwargs.get('user_id', None)
collection_name = self.kwargs.get('collection_name', None)
restaurant_id = self.kwargs.get('restaurant_id', None)
if user_id is None:
return Response({'error': 'Please provide a user id.'}, status=status.HTTP_400_BAD_REQUEST)
if restaurant_id is None:
return Response({'error': 'Please provide a restaurant id.'}, status=status.HTTP_400_BAD_REQUEST)
if collection_name is None:
return Response({'error': 'Please provide a collection name.'}, status=status.HTTP_400_BAD_REQUEST)
return super().delete(request, *args, **kwargs)
class UserCollectionsRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = UserCollectionsSerializer
pagination_class = UserCollectionPagination
queryset = UserCollections.objects.all()
def update(self, request, *args, **kwargs):
instance = self.get_object()
add_collaborator_email = request.data.get("add_collaborator_email", None)
remove_collaborator_email = request.data.get("remove_collaborator_email", None)
if add_collaborator_email is not None:
try:
user_object = User.objects.get(email=add_collaborator_email)
instance.collaborators.add(user_object)
except:
return Response({'error': 'No user with that email exists.'}, status=status.HTTP_400_BAD_REQUEST)
if remove_collaborator_email is not None:
try:
user_object = User.objects.get(email=remove_collaborator_email)
instance.collaborators.remove(user_object)
except:
return Response({'error': 'There was an error removing collaborator.'}, status=status.HTTP_400_BAD_REQUEST)
instance.save()
return super().update(request, *args, **kwargs)
|
StarcoderdataPython
|
3339007
|
<reponame>aronchick/gha-arm-experiment
# Import the needed management objects from the libraries. The azure.common library
# is installed automatically with the other libraries.
from azure.common.client_factory import get_client_from_cli_profile
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute import ComputeManagementClient
import uuid
import secrets
import string
import sys
from pathlib import Path
alphabet = string.ascii_letters + string.digits + string.punctuation
password = "".join(
secrets.choice(alphabet) for i in range(60)
) # for a 20-character password
print(f"Provisioning a virtual machine...some operations might take a minute or two.")
# Step 1: Provision a resource group
# Obtain the management object for resources, using the credentials from the CLI login.
resource_client = get_client_from_cli_profile(ResourceManagementClient)
# Constants we need in multiple places: the resource group name and the region
# in which we provision resources. You can change these values however you want.
# THIS NEEDS TO BE PROVISIONED AHEAD OF TIME AND THIS ACCOUNT NEEDS TO HAVE CREATE PERMISSIONS
RESOURCE_GROUP_NAME = "ci_sample_rg"
LOCATION = "centralus"
unique_string = uuid.uuid4().hex
ci_tags = {"type": "ci_resources", "run_id": unique_string}
rg_result = resource_client.resource_groups.get(resource_group_name=RESOURCE_GROUP_NAME)
print(f"Got {rg_result.name} in the {rg_result.location} region")
# For details on the previous code, see Example: Provision a resource group
# at https://docs.microsoft.com/azure/developer/python/azure-sdk-example-resource-group
# Step 2: provision a virtual network
# A virtual machine requires a network interface client (NIC). A NIC requires
# a virtual network and subnet along with an IP address. Therefore we must provision
# these downstream components first, then provision the NIC, after which we
# can provision the VM.
# Network and IP address names
VNET_NAME = f"python-example-vnet-{unique_string}"
SUBNET_NAME = f"python-example-subnet-{unique_string}"
IP_NAME = f"python-example-ip-{unique_string}"
IP_CONFIG_NAME = f"python-example-ip-config-{unique_string}"
NIC_NAME = f"python-example-nic-{unique_string}"
NETWORK_SECURITY_GROUP_NAME = f"python-example-nsg-{unique_string}"
# Obtain the management object for networks
network_client = get_client_from_cli_profile(NetworkManagementClient)
# Provision the virtual network and wait for completion
poller = network_client.virtual_networks.begin_create_or_update(
RESOURCE_GROUP_NAME,
VNET_NAME,
{
"location": LOCATION,
"address_space": {"address_prefixes": ["10.0.0.0/16"]},
"tags": ci_tags,
},
)
vnet_result = poller.result()
print(
f"Provisioned virtual network {vnet_result.name} with address prefixes {vnet_result.address_space.address_prefixes}"
)
# Step 3: Provision the subnet and wait for completion
poller = network_client.subnets.begin_create_or_update(
RESOURCE_GROUP_NAME,
VNET_NAME,
SUBNET_NAME,
{"address_prefix": "10.0.0.0/24", "tags": ci_tags},
)
subnet_result = poller.result()
print(
f"Provisioned virtual subnet {subnet_result.name} with address prefix {subnet_result.address_prefix}"
)
# Step 4: Provision an IP address and wait for completion
poller = network_client.public_ip_addresses.begin_create_or_update(
RESOURCE_GROUP_NAME,
IP_NAME,
{
"location": LOCATION,
"sku": {"name": "Standard"},
"public_ip_allocation_method": "Static",
"public_ip_address_version": "IPV4",
"tags": ci_tags,
},
)
ip_address_result = poller.result()
print(
f"Provisioned public IP address {ip_address_result.name} with address {ip_address_result.ip_address}"
)
# Step 5: Provision the network interface client
poller = network_client.network_security_groups.begin_create_or_update(
RESOURCE_GROUP_NAME,
NETWORK_SECURITY_GROUP_NAME,
{
"location": LOCATION,
"properties": {
"securityRules": [
{
"name": "ssh_allow",
"properties": {
"protocol": "*",
"sourceAddressPrefix": "*",
"destinationAddressPrefix": "*",
"access": "Allow",
"destinationPortRange": "22",
"sourcePortRange": "*",
"priority": 130,
"direction": "Inbound",
},
}
]
},
"tags": ci_tags,
},
)
nsg_result = poller.result()
print(f"Provisioned network security group {nsg_result.name}")
poller = network_client.network_interfaces.begin_create_or_update(
RESOURCE_GROUP_NAME,
NIC_NAME,
{
"location": LOCATION,
"ip_configurations": [
{
"name": IP_CONFIG_NAME,
"subnet": {"id": subnet_result.id},
"public_ip_address": {"id": ip_address_result.id},
}
],
"network_security_group": {"id": f"{nsg_result.id}"},
"tags": ci_tags,
},
)
nic_result = poller.result()
print(f"Provisioned network interface client {nic_result.name}")
# Step 6: Provision the virtual machine
# Obtain the management object for virtual machines
compute_client = get_client_from_cli_profile(ComputeManagementClient)
VM_NAME = f"CI-VM-{unique_string}"
USERNAME = f"{uuid.uuid4().hex}"
PASSWORD = f"{password}"
print(
f"Provisioning virtual machine {VM_NAME}; this operation might take a few minutes."
)
# Provision the VM specifying only minimal arguments, which defaults to an Ubuntu 18.04 VM
# on a Standard DS1 v2 plan with a public IP address and a default virtual network/subnet.
ssh_pub_key = Path("/tmp/sshkey.pub").read_text().rstrip()
print(f"SSH Key: {ssh_pub_key}")
pub_path = f"/home/{USERNAME}/.ssh/authorized_keys"
print(f"Pub_path: {pub_path}")
poller = compute_client.virtual_machines.begin_create_or_update(
RESOURCE_GROUP_NAME,
VM_NAME,
{
"location": LOCATION,
"storage_profile": {
"image_reference": {
"publisher": "Canonical",
"offer": "UbuntuServer",
"sku": "16.04.0-LTS",
"version": "latest"
}
},
"hardware_profile": {"vm_size": "Standard_DS1_v2"},
"osProfile": {
"adminUsername": f"{USERNAME}",
"computerName": f"{VM_NAME}",
"linuxConfiguration": {
"ssh": {
"publicKeys": [
{
"path": f"{pub_path}",
"keyData": f"{ssh_pub_key}",
}
]
},
"disablePasswordAuthentication": True,
},
},
"network_profile": {"network_interfaces": [{"id": nic_result.id,}]},
"tags": ci_tags,
},
)
vm_result = poller.result()
print(f"Provisioned virtual machine {vm_result.name}")
print(f"::set-output name=VM_NAME::{VM_NAME}")
print(f"::set-output name=USERNAME::{USERNAME}")
print(f"::set-output name=IP_ADDRESS::{ip_address_result.ip_address}")
print(f"::set-output name=RUN_ID::{unique_string}")
print(f"SSH Command = \nssh -i /tmp/sshkey -o StrictHostKeyChecking=no {USERNAME}@{ip_address_result.ip_address} 'ls -lAR'")
|
StarcoderdataPython
|
86247
|
<gh_stars>10-100
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
def sketch(image):
# Convert image to gray scale
img_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# Clean up image using Gaussian Blur
img_gray_blur = cv.GaussianBlur(img_gray, (5, 5), 0)
# Extract Edges
canny_edges = cv.Canny(img_gray_blur, 30, 70)
# Do an invert binarize the image
ret, mask = cv.threshold(canny_edges, 120, 255, cv.THRESH_BINARY_INV)
return mask
def liveSketch():
cap = cv.VideoCapture(0)
while True:
ret, frame = cap.read()
cv.imshow("Live Sketch", sketch(frame))
if cv.waitKey(1) == 27:
break
cap.release()
cv.destroyAllWindows()
if __name__ == "__main__":
liveSketch()
|
StarcoderdataPython
|
1649946
|
import unittest
from .. import main
def test_entry_point():
assert main() == "Here's the entry point"
|
StarcoderdataPython
|
3320324
|
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class Testixor(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
input2 = np.random.uniform(min_d, max_d, shape).astype(dtype)
#modify from numpy.ndarray to torch.tensor
npu_input1 = torch.from_numpy(input1)
npu_input2 = torch.from_numpy(input2)
return npu_input1, npu_input2
def generate_bool_data(self, min_d, max_d, shape):
input1 = np.random.uniform(min_d, max_d, shape)
input2 = np.random.uniform(min_d, max_d, shape)
input1 = input1.reshape(-1)
input2 = input2.reshape(-1)
for i in range(len(input1)):
if input1.any() < 0.5:
input1[i] = 0
for i in range(len(input2)):
if input2.any() < 0.5:
input2[i] = 0
input1 = input1.astype(np.bool)
input2 = input2.astype(np.bool)
input1 = input1.reshape(shape)
input2 = input2.reshape(shape)
#modify from numpy.ndarray to torch.tensor
npu_input1 = torch.from_numpy(input1)
npu_input2 = torch.from_numpy(input2)
return npu_input1, npu_input2
def generate_single_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def generate_single_bool_data(self, min_d, max_d, shape):
input1 = np.random.uniform(min_d, max_d, shape)
input1 = input1.reshape(-1)
for i in range(len(input1)):
if input1[i] < 0.5:
input1[i] = 0
input1 = input1.astype(np.bool)
input1 = input1.reshape(shape)
npu_input1 = torch.from_numpy(input1)
return npu_input1
def generate_three_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
input2 = np.random.uniform(min_d, max_d, shape).astype(dtype)
input3 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
npu_input2 = torch.from_numpy(input2)
npu_input3 = torch.from_numpy(input3)
return npu_input1, npu_input2, npu_input3
def npu_op_exec_out(self, input1, input2, input3):
input1 = input1.to("npu")
input2 = input2.to("npu")
output = input3.to("npu")
input1.__ixor__(input2, out=output)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_out(self, input1, input2, input3):
output = input3
input1.__ixor__(input2, out=output)
output = output.numpy()
return output
def npu_op_exec_scalar_out(self, input1, input2, input3):
output = input3.to("npu")
input1 = input1.to("npu")
input2 = torch.tensor(input2)
input2 = input2.to("npu")
input1.__ixor__(input2, out=output)
output = output.to("cpu")
output = output.numpy()
return output
def test__ixor__int32(self, device):
npu_input1, npu_input2 = self.generate_data(0, 100, (2,3), np.int32)
cpu_output = self.cpu_op_exec_out(npu_input1, npu_input2,npu_input1)
npu_output = self.npu_op_exec_out(npu_input1, npu_input2,npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test__ixor__int32_scalar(self, device):
npu_input1, npu_input2 = self.generate_data(0, 100, (2,3), np.int32)
cpu_output = self.cpu_op_exec_out(npu_input1, 1, npu_input1)
npu_output = self.npu_op_exec_scalar_out(npu_input1, 1, npu_input1)
self.assertRtolEqual(cpu_output, npu_output)
def test__ixor__float32_out(self, device):
npu_input1, npu_input2, npu_input3 = self.generate_three_data(0, 100, (4, 3), np.int32)
cpu_output = self.cpu_op_exec_out(npu_input1, npu_input2, npu_input3)
npu_output = self.npu_op_exec_out(npu_input1, npu_input2, npu_input3)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(Testixor, globals(), except_for='cpu')
if __name__ == '__main__':
run_tests()
|
StarcoderdataPython
|
169517
|
from helpers import render
def aboutus(request):
return render(request, {}, 'news/aboutus.html')
def help(request):
return render(request, {}, 'news/help.html')
def buttons(request):
return render(request, {}, 'news/buttons.html')
|
StarcoderdataPython
|
191771
|
<gh_stars>0
from . import exportFix, get_files
import argparse
import os
def check_file(file):
if not os.access(file, os.W_OK):
parser.error('File could not be accessed. Make sure file exists and can be modified')
else:
return file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update mod exported into BeamNG Drive from Automation for turbo overheating bug. Can be done either automatically on a set number of most recent files or on a specified file path')
parser.add_argument('-f', dest='filepath', help='Filepath to operate on', metavar='FILE', type=check_file)
parser.add_argument('-a', dest='auto_count', help='Automatically operate on latest files in BeamNG mods folder', metavar='N', type=int)
args = parser.parse_args()
if not(args.filepath or args.auto_count):
parser.error('Must select at least one option')
if args.filepath:
exportFix.fix_file(args.filepath)
if args.auto_count:
for file in get_files.get_files_sorted()[:args.auto_count]:
exportFix.fix_file(file)
|
StarcoderdataPython
|
3354788
|
<filename>FWCore/Integration/test/testConcurrentIOVsESConcurrentSource_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstLuminosityBlock = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1),
numberEventsInLuminosityBlock = cms.untracked.uint32(1),
numberEventsInRun = cms.untracked.uint32(100)
)
process.maxEvents.input = 8
process.options = dict(
numberOfThreads = 4,
numberOfStreams = 4,
numberOfConcurrentRuns = 1,
numberOfConcurrentLuminosityBlocks = 4,
eventSetup = dict(
numberOfConcurrentIOVs = 2
)
)
process.testESSource = cms.ESSource("TestESConcurrentSource",
firstValidLumis = cms.vuint32(1, 4, 6, 7, 8, 9),
iterations = cms.uint32(10*1000*1000),
checkIOVInitialization = cms.bool(True),
expectedNumberOfConcurrentIOVs = cms.uint32(2)
)
process.concurrentIOVESProducer = cms.ESProducer("ConcurrentIOVESProducer")
process.test = cms.EDAnalyzer("ConcurrentIOVAnalyzer",
checkExpectedValues = cms.untracked.bool(False)
)
process.testOther = cms.EDAnalyzer("ConcurrentIOVAnalyzer",
checkExpectedValues = cms.untracked.bool(False),
fromSource = cms.untracked.ESInputTag(":other")
)
process.busy1 = cms.EDProducer("BusyWaitIntProducer",ivalue = cms.int32(1), iterations = cms.uint32(10*1000*1000))
process.p1 = cms.Path(process.busy1 * process.test * process.testOther)
#process.add_(cms.Service("Tracer"))
|
StarcoderdataPython
|
1759403
|
<filename>remote_sensing/python/drivers/00_Kirti_Mike_initial_plots_Grant/00_peak_tables_and_plots/d_peak_and_plots_for_Kirti_MikeBrady.py
"""
Peak and plot simultaneously
"""
import csv
import numpy as np
import pandas as pd
# import geopandas as gpd
from IPython.display import Image
# from shapely.geometry import Point, Polygon
from math import factorial
import datetime
import time
import scipy
import os, os.path
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn.linear_model import LinearRegression
from patsy import cr
# from pprint import pprint
import matplotlib.pyplot as plt
import seaborn as sb
import sys
start_time = time.time()
# search path for modules
# look @ https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
####################################################################################
###
### Core path
###
####################################################################################
sys.path.append('/Users/hn/Documents/00_GitHub/Ag/remote_sensing/python/')
####################################################################################
###
### Directories
###
####################################################################################
data_dir = "/Users/hn/Documents/01_research_data/Ag_check_point/" + \
"remote_sensing/01_NDVI_TS/Grant/No_EVI/Grant_10_cloud/Grant_2016/"
param_dir = "/Users/hn/Documents/00_GitHub/Ag/remote_sensing/parameters/"
####################################################################################
###
### Aeolus Core path
###
####################################################################################
sys.path.append('/home/hnoorazar/remote_sensing_codes/')
####################################################################################
###
### Aeolus Directories
###
####################################################################################
data_dir = "/data/hydro/users/Hossein/remote_sensing/" + \
"01_NDVI_TS/Grant/No_EVI/Grant_10_cloud/Grant_2016/"
param_dir = "/home/hnoorazar/remote_sensing_codes/parameters/"
####################################################################################
###
### Parameters
###
####################################################################################
freedom_df = 7
look_ahead = 8
freedom_df = int(sys.argv[1])
look_ahead = int(sys.argv[2])
double_crop_potential_plants = pd.read_csv(param_dir + "double_crop_potential_plants.csv")
double_crop_potential_plants.head(2)
####################################################################################
###
### Import remote cores
###
####################################################################################
import remote_sensing_core as rc
import remote_sensing_core as rcp
output_dir = data_dir
plot_dir_base = data_dir + "/plots/"
####################################################################################
###
### Data Reading
###
####################################################################################
file_names = ["Grant_2016_TS.csv"]
file_N = file_names[0]
a_df = pd.read_csv(data_dir + file_N)
####################################################################################
###
### process data
###
####################################################################################
#
# The following columns do not exist in the old data
#
if not('DataSrc' in a_df.columns):
print ("Data source is being set to NA")
a_df['DataSrc'] = "NA"
if not('CovrCrp' in a_df.columns):
print ("Data source is being set to NA")
a_df['CovrCrp'] = "NA"
a_df = rc.initial_clean_NDVI(a_df)
a_df.head(2)
an_EE_TS = a_df.copy()
# an_EE_TS = rc.initial_clean_NDVI(an_EE_TS)
################
###
### Just keep the potential fields
###
################
a_df = a_df[a_df.CropTyp.isin(double_crop_potential_plants['Crop_Type'])]
### List of unique polygons
polygon_list = an_EE_TS['geo'].unique()
print(len(polygon_list))
output_columns = ['Acres', 'CovrCrp', 'CropGrp', 'CropTyp',
'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'county', 'year', 'geo',
'peak_Doy', 'peak_value', 'peak_count']
all_polygons_and_their_peaks = pd.DataFrame(data=None,
index=np.arange(3*len(an_EE_TS)),
columns=output_columns)
double_columns = ['Acres', 'CovrCrp', 'CropGrp', 'CropTyp',
'DataSrc', 'ExctAcr', 'IntlSrD', 'Irrigtn', 'LstSrvD', 'Notes',
'RtCrpTy', 'Shap_Ar', 'Shp_Lng', 'TRS', 'county', 'year', 'geo',
'peak_count']
double_polygons = pd.DataFrame(data=None,
index=np.arange(2*len(an_EE_TS)),
columns=double_columns)
pointer = 0
double_pointer = 0
counter = 0
for a_poly in polygon_list:
if (counter%1000 == 0):
print (counter)
counter += 1
curr_field = an_EE_TS[an_EE_TS['geo']==a_poly]
year = int(curr_field['year'].unique())
plant = curr_field['CropTyp'].unique()[0]
# Take care of names, replace "/" and "," and " " by "_"
plant = plant.replace("/", "_")
plant = plant.replace(",", "_")
plant = plant.replace(" ", "_")
plant = plant.replace("__", "_")
county = curr_field['county'].unique()[0]
TRS = curr_field['TRS'].unique()[0]
###
### There is a chance that a polygon is repeated twice?
###
X = curr_field['doy']
y = curr_field['NDVI']
#############################################
###
### Smoothen
###
#############################################
# Generate spline basis with "freedom_df" degrees of freedom
x_basis = cr(X, df=freedom_df, constraints='center')
# Fit model to the data
model = LinearRegression().fit(x_basis, y)
# Get estimates
y_hat = model.predict(x_basis)
#############################################
###
### find peaks
###
#############################################
# peaks_LWLS_1 = peakdetect(LWLS_1[:, 1], lookahead = 10, delta=0)
# max_peaks = peaks_LWLS_1[0]
# peaks_LWLS_1 = form_xs_ys_from_peakdetect(max_peak_list = max_peaks, doy_vect=X)
peaks_spline = rc.peakdetect(y_hat, lookahead = look_ahead, delta=0)
max_peaks = peaks_spline[0]
peaks_spline = rc.form_xs_ys_from_peakdetect(max_peak_list = max_peaks, doy_vect=X)
# print(peaks_spline)
DoYs_series = pd.Series(peaks_spline[0])
peaks_series = pd.Series(peaks_spline[1])
peak_df = pd.DataFrame({
'peak_Doy': DoYs_series,
'peak_value': peaks_series
})
# add number of peaks to the data frame.
peak_df['peak_count'] = peak_df.shape[0]
WSDA_df = rc.keep_WSDA_columns(curr_field)
WSDA_df = WSDA_df.drop_duplicates()
if (len(peak_df)>0):
WSDA_df = pd.concat([WSDA_df]*peak_df.shape[0]).reset_index()
# WSDA_df = pd.concat([WSDA_df, peak_df], axis=1, ignore_index=True)
WSDA_df = WSDA_df.join(peak_df)
if ("index" in WSDA_df.columns):
WSDA_df = WSDA_df.drop(columns=['index'])
# all_polygons_and_their_peaks = all_polygons_and_their_peaks.append(WSDA_df, sort=False)
"""
copy the .values. Otherwise the index inconsistency between
WSDA_df and all_poly... will prevent the copying.
"""
all_polygons_and_their_peaks.iloc[pointer:(pointer + len(WSDA_df))] = WSDA_df.values
#
# if we have double peaks add them to the double_polygons
#
if (len(WSDA_df) == 2):
# print(plant, county, year, counter)
WSDA_df = WSDA_df.drop(columns=['peak_Doy', 'peak_value'])
WSDA_df = WSDA_df.drop_duplicates()
double_polygons.iloc[double_pointer:(double_pointer + len(WSDA_df))] = WSDA_df.values
double_pointer += len(WSDA_df)
pointer += len(WSDA_df)
#############################################
###
### plot
###
#############################################
sub_out = "/plant_based_plots/LA_" + str(look_ahead) + "_df_" + str(freedom_df) + "/" + plant + "/"
plot_path = plot_dir_base + sub_out
plot_path = plot_path + str(peak_df.shape[0]) + "_peaks/"
os.makedirs(plot_path, exist_ok=True)
if (len(os.listdir(plot_path))<100):
plot_title = county + ", " + plant + ", " + str(year) + " (" + TRS + ")"
sb.set();
fig, ax = plt.subplots(figsize=(8,6));
ax.plot(X, y, label="NDVI");
ax.plot(X, y_hat, 'r', label="smoothing spline")
ax.scatter(DoYs_series, peaks_series, s=100, c='g', marker='*');
ax.set_title(plot_title);
ax.set(xlabel='DoY', ylabel='NDVI')
ax.legend(loc="best");
fig_name = plot_path + county + "_" + plant + "_" + str(year) + "_" + str(counter) + '.png'
plt.savefig(fname = fig_name, \
dpi=500,
bbox_inches='tight')
plt.close()
del(plot_path, sub_out, county, plant, year)
# print(plot_path)
# print(sub_out)
# print(county)
# print(plant)
# print(year)
"""
if peak_df.shape[0]==2:
double_plot_path = plot_dir_base + "/double_peaks/" + sub_out
os.makedirs(double_plot_path, exist_ok=True)
plot_title = county + ", " + plant + ", " + str(year) + " (" + TRS + ")"
sb.set();
fig, ax = plt.subplots(figsize=(8, 6));
ax.plot(X, y, label="NDVI data");
ax.plot(X, y_hat, 'r', label="smoothing spline result")
ax.scatter(DoYs_series, peaks_series, s=100, c='g', marker='*');
ax.set_title(plot_title);
ax.set(xlabel='DoY', ylabel='NDVI')
ax.legend(loc="best");
fig_name = double_plot_path + county + "_" + plant + "_" + str(year) + "_" + str(counter) + '.png'
plt.savefig(fname = fig_name, \
dpi=500,
bbox_inches='tight')
if (len(os.listdir(plot_path))<100):
fig_name = plot_path + county + "_" + plant + "_" + str(year) + "_" + str(counter) + '.png'
plt.savefig(fname = fig_name, \
dpi=500,
bbox_inches='tight')
plt.close()
del(plot_path, sub_out, county, plant, year)
else:
if (len(os.listdir(plot_path))<100):
plot_title = county + ", " + plant + ", " + str(year) + " (" + TRS + ")"
sb.set();
fig, ax = plt.subplots(figsize=(8,6));
ax.plot(X, y, label="NDVI data");
ax.plot(X, y_hat, 'r', label="smoothing spline result")
ax.scatter(DoYs_series, peaks_series, s=100, c='g', marker='*');
ax.set_title(plot_title);
ax.set(xlabel='DoY', ylabel='NDVI')
ax.legend(loc="best");
fig_name = plot_path + county + "_" + plant + "_" + str(year) + "_" + str(counter) + '.png'
plt.savefig(fname = fig_name, \
dpi=500,
bbox_inches='tight')
plt.close()
del(plot_path, sub_out, county, plant, year)
"""
# to make sure the reference by address thing
# will not cause any problem.
del(WSDA_df)
####################################################################################
###
### Compute double crop area
###
####################################################################################
####################################################################################
###
### Write the outputs
###
####################################################################################
all_polygons_and_their_peaks = all_polygons_and_their_peaks[0:(pointer+1)]
double_polygons = double_polygons[0:(double_pointer+1)]
out_name = output_dir + "LA_" + str(look_ahead) + "_df_"+ str(freedom_df) + "_all_polygons_and_their_peaks.csv"
all_polygons_and_their_peaks.to_csv(out_name, index = False)
out_name = output_dir + "LA_" + str(look_ahead) + "_df_"+ str(freedom_df) + "_double_polygons.csv"
double_polygons.to_csv(out_name, index = False)
end_time = time.time()
print(end_time - start_time)
|
StarcoderdataPython
|
122792
|
<filename>exercises/ja/exc_02_02_01.py
from spacy.lang.ja import Japanese
nlp = Japanese()
doc = nlp("私はネコを飼っています")
# 単語「ネコ」のハッシュを引く
cat_hash = ____.____.____[____]
print(cat_hash)
# cat_hashを使って文字列を引く
cat_string = ____.____.____[____]
print(cat_string)
|
StarcoderdataPython
|
1605354
|
from __future__ import absolute_import
#!/usr/bin/env python
import sys
import unittest
sys.path.append('xypath')
import xypath
import messytables
try:
import hamcrest
except ImportError:
hamcrest = None
import re
import tcore
class Test_Import_Missing(tcore.TMissing):
def test_table_has_properties_at_all(self):
self.table.sheet
class Test_Import(tcore.TCore):
def test_table_has_sheet_properties(self):
self.assertIn('xlrd', repr(self.table.sheet))
#import
def test_from_filename_with_table_name(self):
"""Can we specify only the filename and 'name' of the table?"""
if hamcrest is None:
raise unittest.SkipTest("Requires Hamcrest")
table = xypath.Table.from_filename(
self.wpp_filename,
table_name='NOTES')
self.assertEqual(32, len(table))
table.filter(
hamcrest.contains_string('(2) Including Zanzibar.')).assert_one()
#import
def test_from_filename_with_table_index(self):
"""Can we specify only the filename and index of the table?"""
new_table = xypath.Table.from_filename(self.wpp_filename,
table_index=5)
self.assertEqual(1, len(new_table.filter('(2) Including Zanzibar.')))
#import
def test_from_file_object_table_index(self):
with open(self.wpp_filename, 'rb') as f:
extension = tcore.get_extension(self.wpp_filename)
new_table = xypath.Table.from_file_object(
f, extension, table_index=5)
self.assertEqual(1, len(new_table.filter('(2) Including Zanzibar.')))
#import
def test_from_file_object_table_name(self):
with open(self.wpp_filename, 'rb') as f:
extension = tcore.get_extension(self.wpp_filename)
new_table = xypath.Table.from_file_object(
f, extension, table_name='NOTES')
self.assertEqual(1, len(new_table.filter('(2) Including Zanzibar.')))
#import
def test_from_file_object_no_table_specifier(self):
with open(self.wpp_filename, 'rb') as f:
extension = tcore.get_extension(self.wpp_filename)
self.assertRaises(
TypeError,
lambda: xypath.Table.from_file_object(f, extension))
#import
def test_from_file_object_ambiguous_table_specifier(self):
with open(self.wpp_filename, 'rb') as f:
extension = tcore.get_extension(self.wpp_filename)
self.assertRaises(
TypeError,
lambda: xypath.Table.from_file_object(
f, extension, table_name='NOTES', table_index=4))
#import
def test_from_messy(self):
new_table = xypath.Table.from_messy(self.messy.tables[0])
self.assertEqual(265, len(new_table.filter('Estimates')))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.