filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_24944 | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
import veriloggen
import matrix_sub
a_shape = (15, 15)
b_shape = (15, 15)
a_dtype = ng.int32
b_dtype = ng.int32
c_dtype = ng.int32
par = 1
axi_datawidth = 32
def test(request, silent=True):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = matrix_sub.run(a_shape, b_shape,
a_dtype, b_dtype, c_dtype,
par, axi_datawidth, silent,
filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
if __name__ == '__main__':
rslt = matrix_sub.run(a_shape, b_shape,
a_dtype, b_dtype, c_dtype,
par, axi_datawidth, silent=False,
filename='tmp.v',
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
print(rslt)
|
the-stack_106_24947 | from XFE_v2 import Client, ip_command, url_command, cve_get_command, \
cve_search_command, file_command, whois_command
from CommonServerPython import outputPaths
DBOT_SCORE_KEY = 'DBotScore(val.Indicator == obj.Indicator && val.Vendor == obj.Vendor)'
MOCK_BASE_URL = 'https://www.this-is-a-fake-url.com'
MOCK_API_KEY = 'FAKE-API-KEY'
MOCK_PASSWORD = 'FAKE-PASSWORD'
MOCK_IP = '8.8.8.8'
MOCK_URL = 'https://www.google.com'
MOCK_CVE = 'CVE-2014-2601'
MOCK_HASH = '474B9CCF5AB9D72CA8A333889BBB34F0'
MOCK_HOST = 'google.com'
MOCK_CVE_QUERY = 'hello'
MOCK_IP_RESP = {
"ip": "8.8.8.8",
"history": [
{
"created": "2012-03-22T07:26:00.000Z",
"reason": "Regional Internet Registry",
"geo": {
"country": "United States",
"countrycode": "US"
},
"ip": "8.0.0.0/8",
"categoryDescriptions": {},
"reasonDescription": "One of the five RIRs announced a (new) location mapping of the IP.",
"score": 1,
"cats": {}
}],
"subnets": [
{
"created": "2018-04-24T06:22:00.000Z",
"reason": "Regional Internet Registry",
"reason_removed": True,
"asns": {
"3356": {
"removed": True,
"cidr": 8
}
},
"ip": "8.0.0.0",
"categoryDescriptions": {},
"reasonDescription": "One of the five RIRs announced a (new) location mapping of the IP.",
"score": 1,
"cats": {},
"subnet": "8.0.0.0/8"
}
],
"cats": {},
"geo": {
"country": "United States",
"countrycode": "US"
},
"DBotScore(val.Indicator == obj.Indicator && val.Vendor == obj.Vendor)": {
"Indicator": "8.8.8.8",
"Type": "ip",
"Vendor": "XFE",
"Score": 1,
"Reliability": "C - Fairly reliable"
},
"score": 1,
"reason": "Regional Internet Registry",
"reasonDescription": "One of the five RIRs announced a (new) location mapping of the IP.",
"categoryDescriptions": {},
"tags": []
}
MOCK_INVALID_IP_RESP = {
'ip': '8.8.8.8',
'history': [],
'subnets': [],
'cats': {},
'score': 1,
'tags': []
}
MOCK_URL_RESP = {
"result": {
"url": "https://www.google.com",
"cats": {
"Search Engines / Web Catalogs / Portals": True
},
"score": 1,
"categoryDescriptions": {
"Search Engines / Web Catalogs / Portals": "This category contains search engines."
}
},
"associated": [
{
"url": "google.com",
"cats": {
"Search Engines / Web Catalogs / Portals": True
},
"score": 1,
"categoryDescriptions": {
"Search Engines / Web Catalogs / Portals": "This category contains search engines,"
}
},
{
"url": "www.google.com",
"cats": {
"Search Engines / Web Catalogs / Portals": True
},
"score": 1,
"categoryDescriptions": {
"Search Engines / Web Catalogs / Portals": "This category contains search engines,"
}
}
],
"DBotScore(val.Indicator == obj.Indicator && val.Vendor == obj.Vendor)": {
"Indicator": "https://www.google.com",
"Type": "url",
"Vendor": "XFE",
"Score": 1,
"Reliability": "C - Fairly reliable"
},
"tags": []
}
MOCK_CVE_RESP = [
{
"type": "vulnerability",
"xfdbid": 92744,
"updateid": 0,
"variant": "single",
"title": "HP Integrated Lights-Out 2 Heartbleed denial of service",
"description": "HP Integrated Lights-Out 2 (iLO 2) is vulnerable to a denial of service,",
"risk_level": 7.8,
"cvss": {
"version": "2.0",
"authentication": "None",
"access_vector": "Network",
"access_complexity": "Low",
"confidentiality_impact": "None",
"integrity_impact": "None",
"availability_impact": "Complete",
"remediation_level": "Official Fix"
},
"temporal_score": 5.8,
"remedy": "Refer to HPSBHF03006 for patch, upgrade or suggested workaround information. See References.",
"remedy_fmt": "<P>Refer to HPSBHF03006 for patch, upgrade or suggested workaround information.</P>",
"reported": "2014-04-24T00:00:00Z",
"tagname": "hp-ilo-cve20142601-dos",
"stdcode": [
"BID-67054",
"SA58224",
"CVE-2014-2601"
],
"platforms_affected": [
"HP Integrated Lights-Out 2 (iLO2) 2.23"
],
"exploitability": "Unproven",
"consequences": "Denial of Service",
"references": [
{
"link_target": "https://h20564.www2.hp.com",
"link_name": "HPSBHF03006",
"description": "HP Integrated Lights-Out 2 (iLO 2) Denial of Service"
},
{
"link_target": "http://www.securityfocus.com/bid/67054",
"link_name": "BID-67054",
"description": "HP Integrated Lights-Out CVE-2014-2601 Remote Denial of Service Vulnerability"
},
{
"link_target": "http://secunia.com/advisories/58224",
"link_name": "SA58224",
"description": "HP Integrated Lights-Out 2 Denial of Service Vulnerability"
},
{
"link_target": "http://cve.mitre.org",
"link_name": "CVE-2014-2601",
"description": "The server in HP Integrated Lights-Out 2"
}
],
"report_confidence": "Confirmed",
"uuid": "7d71d8e3856c692cb73c4b7daf1c21ce"
}
]
MOCK_RECENT_CVE_RESP = [
{
"type": "vulnerability",
"xfdbid": 174800,
"updateid": 83006,
"inserted": True,
"variant": "single",
"title": "Resim Ara plugin for WordPress cross-site scripting",
"description": "Resim Ara plugin for WordPress is vulnerable to cross-site scripting,",
"risk_level": 6.1,
"cvss": {
"version": "3.0",
"privilegesrequired": "None",
"userinteraction": "Required",
"scope": "Changed",
"access_vector": "Network",
"access_complexity": "Low",
"confidentiality_impact": "Low",
"integrity_impact": "Low",
"availability_impact": "None",
"remediation_level": "Unavailable"
},
"temporal_score": 5.9,
"remedy": "No remedy available as of January 20, 2020.",
"remedy_fmt": "<P>No remedy available as of January 20, 2020.</P>",
"reported": "2020-01-16T00:00:00Z",
"tagname": "resimara-unknown-xss",
"platforms_affected": [
"WordPress Resim Ara plugin for WordPress 1.0"
],
"platforms_dependent": [
"WordPress WordPress"
],
"exploitability": "High",
"consequences": "Cross-Site Scripting",
"references": [
{
"link_target": "https://packetstormsecurity.com/files/155980",
"link_name": "Packet Storm Security [01-16-2020]",
"description": "WordPress Resim ara 1.0 Cross Site Scripting"
},
{
"link_target": "https://wordpress.org/plugins/resim-ara/",
"link_name": "WordPress Plugin Directory",
"description": "resim-ara"
}
],
"signatures": [
{
"coverage": "Cross_Site_Scripting",
"coverage_date": "2008-11-11T00:00:00Z"
}
],
"report_confidence": "Reasonable"
}
]
MOCK_HASH_RESP = {
"malware": {
"origins": {
"emails": {
},
"CnCServers": {
"rows": [
{
"type": "CnC",
"md5": "474B9CCF5AB9D72CA8A333889BBB34F0",
"domain": "pc-guard.net",
"firstseen": "2014-10-20T23:19:00Z",
"lastseen": "2014-10-20T23:19:00Z",
"ip": "61.255.239.86",
"count": 483,
"schema": "http",
"filepath": "v.html",
"origin": "CnC",
"uri": "http://pc-guard.net/v.html"
}
],
"count": 1
},
"downloadServers": {
},
"subjects": {
},
"external": {
"source": "reversingLabs",
"firstSeen": "2014-12-09T06:10:00Z",
"lastSeen": "2018-12-16T20:55:00Z",
"malwareType": "Trojan",
"platform": "Win32",
"detectionCoverage": 43,
"family": [
"badur"
]
}
},
"type": "md5",
"md5": "0x474B9CCF5AB9D72CA8A333889BBB34F0",
"hash": "0x474B9CCF5AB9D72CA8A333889BBB34F0",
"created": "2014-10-20T23:19:00Z",
"family": [
"tsunami"
],
"familyMembers": {
"tsunami": {
"count": 61
}
},
"risk": "high"
},
"tags": [
]
}
MOCK_HOST_RESP = {
"createdDate": "1997-09-15T07:00:00.000Z",
"updatedDate": "2019-09-09T15:39:04.000Z",
"expiresDate": "2028-09-13T07:00:00.000Z",
"contactEmail": "[email protected]",
"registrarName": "MarkMonitor, Inc.",
"contact": [
{
"type": "registrant",
"organization": "Google LLC",
"country": "United States"
}
],
"extended": {
"createdDate": "1997-09-15T07:00:00.000Z",
"updatedDate": "2019-09-09T15:39:04.000Z",
"expiresDate": "2028-09-13T07:00:00.000Z",
"contactEmail": "[email protected]",
"registrarName": "MarkMonitor, Inc.",
"contact": [
{
"type": "registrant",
"organization": "Google LLC",
"country": "United States"
}
]
}
}
MOCK_CVE_SEARCH_RESP = {'total_rows': 1,
'bookmark': 'g1AAAAMHeJzLYWBg4MhgTmFQTUlKzi9KdUhJstDLTMrVrUjLL0pONTAw1EvOyS9NScwr0ctLLckBKmdKZ',
'rows': [{'type': 'vulnerability',
'xfdbid': 161573,
'updateid': 66943,
'inserted': True,
'variant': 'single',
'title': 'wolfSSL DoPreSharedKeys function buffer overflow',
'description': 'wolfSSL is vulnerable to a buffer overflow.',
'risk_level': 9.8,
'cvss': {'version': '3.0',
'privilegesrequired': 'None',
'userinteraction': 'None',
'scope': 'Unchanged',
'access_vector': 'Network',
'access_complexity': 'Low',
'confidentiality_impact': 'High',
'integrity_impact': 'High',
'availability_impact': 'High',
'remediation_level': 'Official Fix'},
'temporal_score': 8.5,
'remedy': 'Refer to wolfssl GIT Repository for patch.',
'remedy_fmt': 'Refer to wolfssl GIT Repository for patch',
'reported': '2019-05-15T00:00:00Z',
'tagname': 'wolfssl-cve201911873-bo',
'stdcode': ['CVE-2019-11873'],
'platforms_affected': ['wolfSSL wolfSSL 4.0.0'],
'exploitability': 'Unproven',
'consequences': 'Gain Access',
'references': [{
'link_target': 'https://www.telekom.com/en/corporate-responsibility',
'link_name': 'Telekom Web site',
'description': 'Critical remote buffer overflow vulnerability in wolfSSL library'},
{'link_target': 'https://github.com/wolfSSL/wolfssl/pull/2239',
'link_name': 'wolfssl GIT Repository',
'description': 'add sanity check on length of PSK identity #2239'},
{'link_target': '',
'link_name': 'CVE-2019-11873',
'description': 'wolfSSL 4.0.0 has a Buffer Overflow.'}],
'report_confidence': 'Confirmed'}]}
def test_ip(requests_mock):
"""
Given: Arguments for ip command
When: The server response is complete
Then: validates the outputs
"""
requests_mock.get(f'{MOCK_BASE_URL}/ipr/{MOCK_IP}', json=MOCK_IP_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
args = {
'ip': MOCK_IP
}
_, outputs, _ = ip_command(client, args)
assert outputs[outputPaths['ip']][0]['Address'] == MOCK_IP
assert outputs[DBOT_SCORE_KEY][0] == MOCK_IP_RESP[DBOT_SCORE_KEY]
def test_ip_with_invalid_resp(requests_mock):
"""
Given: Arguments for ip command
When: The server response is not complete and some data fields are empty
Then: validates the outputs
"""
requests_mock.get(f'{MOCK_BASE_URL}/ipr/{MOCK_IP}', json=MOCK_INVALID_IP_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
args = {
'ip': MOCK_IP
}
md, outputs, reports = ip_command(client, args)
assert outputs[outputPaths['ip']][0]['Address'] == MOCK_IP
assert reports[0] == {'ip': '8.8.8.8', 'history': [], 'subnets': [], 'cats': {}, 'score': 1, 'tags': []}
assert md == """### X-Force IP Reputation for: 8.8.8.8
https://exchange.xforce.ibmcloud.com/ip/8.8.8.8
|Reason|Score|
|---|---|
| Reason not found. | 1 |
"""
def test_url(requests_mock):
requests_mock.get(f'{MOCK_BASE_URL}/url/{MOCK_URL}', json=MOCK_URL_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
args = {
'url': MOCK_URL
}
_, outputs, _ = url_command(client, args)
assert outputs[outputPaths['url']][0]['Data'] == MOCK_URL
assert outputs[DBOT_SCORE_KEY][0] == MOCK_URL_RESP[DBOT_SCORE_KEY]
def test_get_cve(requests_mock):
requests_mock.get(f'{MOCK_BASE_URL}/vulnerabilities/search/{MOCK_CVE}', json=MOCK_CVE_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
args = {
'cve_id': MOCK_CVE
}
_, outputs, _ = cve_get_command(client, args)
assert outputs[outputPaths['cve']][0]['ID'] == MOCK_CVE
assert outputs[DBOT_SCORE_KEY][0]['Indicator'] == MOCK_CVE, 'The indicator is not matched'
assert outputs[DBOT_SCORE_KEY][0]['Type'] == 'cve', 'The indicator type should be cve'
assert 1 <= outputs[DBOT_SCORE_KEY][0]['Score'] <= 3, 'Invalid indicator score range'
assert outputs[DBOT_SCORE_KEY][0]['Reliability'] == 'C - Fairly reliable'
def test_cve_latest(requests_mock):
requests_mock.get(f'{MOCK_BASE_URL}/vulnerabilities', json=MOCK_RECENT_CVE_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
_, outputs, _ = cve_search_command(client, {})
assert len(outputs[outputPaths['cve']]) == 1, 'CVE output length should be 1'
def test_file(requests_mock):
"""
Given:
- A hash.
When:
- When running the file command.
Then:
- Validate that the file outputs are created properly
- Validate that the DbotScore outputs are created properly
"""
dbot_score_key = 'DBotScore(val.Indicator && val.Indicator == obj.Indicator &&' \
' val.Vendor == obj.Vendor && val.Type == obj.Type)'
requests_mock.get(f'{MOCK_BASE_URL}/malware/{MOCK_HASH}', json=MOCK_HASH_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
outputs = file_command(client, {'file': MOCK_HASH})[0].to_context()['EntryContext']
file_key = next(filter(lambda k: 'File' in k, outputs.keys()), 'File')
assert outputs[file_key][0].get('MD5', '') == MOCK_HASH, 'The indicator value is wrong'
assert outputs[dbot_score_key][0]['Indicator'] == MOCK_HASH, 'The indicator is not matched'
assert outputs[dbot_score_key][0]['Type'] == 'file', 'The indicator type should be file'
assert 1 <= outputs[dbot_score_key][0]['Score'] <= 3, 'Invalid indicator score range'
def test_file_connections(requests_mock):
"""
Given:
- A hash.
When:
- When running the file command.
Then:
- Validate that the relationships are crated correctly
"""
requests_mock.get(f'{MOCK_BASE_URL}/malware/{MOCK_HASH}', json=MOCK_HASH_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
relations = file_command(client, {'file': MOCK_HASH})[0].relationships[0].to_context()
assert relations.get('Relationship') == 'related-to'
assert relations.get('EntityA') == MOCK_HASH
assert relations.get('EntityAType') == 'File'
assert relations.get('EntityB') == 'badur'
assert relations.get('EntityBType') == 'STIX Malware'
def test_whois(requests_mock):
requests_mock.get(f'{MOCK_BASE_URL}/whois/{MOCK_HOST}', json=MOCK_HOST_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
_, outputs, _ = whois_command(client, {'host': MOCK_HOST})
whois_result = outputs['XFE.Whois(obj.Host==val.Host)']
assert whois_result['Host'] == MOCK_HOST, 'The host from output is different'
assert isinstance(whois_result['Contact'], list), 'Contact information should be list'
def test_cve_search(requests_mock):
requests_mock.get(f'{MOCK_BASE_URL}/vulnerabilities/fulltext?q={MOCK_CVE_QUERY}', json=MOCK_CVE_SEARCH_RESP)
client = Client(MOCK_BASE_URL, MOCK_API_KEY, MOCK_PASSWORD, True, False)
_, outputs, _ = cve_search_command(client, {'q': MOCK_CVE_QUERY})
assert outputs['XFE.CVESearch']['TotalRows'] == len(outputs[outputPaths['cve']]), 'Mismatch rows and outputs'
|
the-stack_106_24948 | #!/usr/bin/env python
# coding:utf8
# Copyright (c) 2018, Tencent. All rights reserved
# Implement FastText
# Reference "Bag of Tricks for Efficient Text Classification"
# https://github.com/facebookresearch/fastText
import tensorflow as tf
from model.embedding_layer import EmbeddingLayer
from model.model_helper import ModelHelper
class FastTextEstimator(tf.estimator.Estimator):
def __init__(self, data_processor, model_params):
config = data_processor.config
logger = data_processor.logger
embedding_layer = EmbeddingLayer(config, logger=logger)
model_helper = ModelHelper(config, logger=logger)
def _model_fn(features, labels, mode, params):
self._check(params["feature_names"], data_processor)
input_layer = []
len_list = []
for feature_name in params["feature_names"]:
index = data_processor.dict_names.index(feature_name)
input_layer.append(embedding_layer.get_vocab_embedding_sparse(
feature_name, features["var_len_" + feature_name],
len(data_processor.dict_list[index]), params["epoch"],
pretrained_embedding_file=
data_processor.pretrained_embedding_files[index],
dict_map=data_processor.dict_list[index],
mode=mode))
len_list.append(features[feature_name + "_var_real_len"])
if data_processor.ngram_list[index] > 1:
ngram_name = feature_name + "_ngram"
index = data_processor.dict_names.index(ngram_name)
input_layer.append(
embedding_layer.get_vocab_embedding_sparse(
ngram_name, features["var_len_" + ngram_name],
len(data_processor.dict_list[index]),
params["epoch"],
mode=mode))
len_list.append(features[ngram_name + "_var_real_len"])
hidden_layer = input_layer[0]
total_len = len_list[0]
for i in range(1, len(input_layer)):
hidden_layer = hidden_layer + input_layer[i]
total_len = total_len + len_list[i]
hidden_layer = tf.div(hidden_layer, total_len)
hidden_layer = tf.contrib.layers.fully_connected(
inputs=hidden_layer, num_outputs=256, activation_fn=tf.nn.relu)
hidden_layer = tf.contrib.layers.fully_connected(
inputs=hidden_layer, num_outputs=config.embedding_layer.embedding_dimension, activation_fn=tf.nn.relu)
if mode == tf.estimator.ModeKeys.TRAIN:
hidden_layer = model_helper.dropout(
hidden_layer, config.train.hidden_layer_dropout_keep_prob)
return model_helper.get_softmax_estimator_spec(
hidden_layer, mode, labels, params["label_size"],
params["static_embedding"], data_processor.label_dict_file)
super(FastTextEstimator, self).__init__(
model_fn=_model_fn, model_dir=config.model_common.checkpoint_dir,
config=model_helper.get_run_config(), params=model_params)
super(FastTextEstimator, self).__init__(
model_fn=_model_fn, model_dir=config.model_common.checkpoint_dir,
config=model_helper.get_run_config(), params=model_params)
@staticmethod
def _check(feature_names, data_processor):
for feature_name in feature_names:
assert feature_name in data_processor.dict_names
index = data_processor.dict_names.index(feature_name)
assert len(data_processor.dict_list[index]) > 0
|
the-stack_106_24951 | from datetime import datetime, timedelta
from pytz import timezone, UTC
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.event_type import EventType
from helpers.season_helper import SeasonHelper
from models.event import Event
class TestSeasonHelper(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
def tearDown(self):
self.testbed.deactivate()
def test_effective_season_year_no_events(self):
now = datetime.now()
self.assertEqual(SeasonHelper.effective_season_year(), now.year)
def test_effective_season_year_this_year(self):
# Effective season should be this year
today = datetime.today()
Event(
id="{}testendstomorrow".format(today.year),
end_date=today + timedelta(days=1),
event_short="testendstomorrow",
event_type_enum=EventType.REGIONAL,
first_eid="5561",
name="Test Event (Ends Tomorrow)",
start_date=today,
year=today.year,
venue_address="123 Fake Street, Anytown, MI, USA",
website="http://www.google.com"
).put()
self.assertEqual(SeasonHelper.effective_season_year(), today.year)
def test_effective_season_year_next_year(self):
# Effective season should be next year
today = datetime.today()
Event(
id="{}testended".format(today.year),
end_date=today - timedelta(days=1),
event_short="testended",
event_type_enum=EventType.REGIONAL,
first_eid="5561",
name="Test Event (Ends Tomorrow)",
start_date=today - timedelta(days=2),
year=today.year,
venue_address="123 Fake Street, Anytown, MI, USA",
website="http://www.google.com"
).put()
self.assertEqual(SeasonHelper.effective_season_year(), today.year + 1)
def test_effective_season_year_next_year_ignore_non_official(self):
# Effective season should be next year
today = datetime.today()
# Insert an event that has already happened - otherwise we'll default to the current season
# This is to simulate offseason
Event(
id="{}testended".format(today.year),
end_date=today - timedelta(days=1),
event_short="testended",
event_type_enum=EventType.REGIONAL,
first_eid="5561",
name="Test Event (Ends Tomorrow)",
start_date=today - timedelta(days=2),
year=today.year,
venue_address="123 Fake Street, Anytown, MI, USA",
website="http://www.google.com"
).put()
Event(
id="{}testendstomorrow".format(today.year),
end_date=today + timedelta(days=1),
event_short="testendstomorrow",
event_type_enum=EventType.OFFSEASON,
first_eid="5561",
name="Test Event (Ends Tomorrow)",
start_date=today,
year=today.year,
venue_address="123 Fake Street, Anytown, MI, USA",
website="http://www.google.com"
).put()
self.assertEqual(SeasonHelper.effective_season_year(), today.year + 1)
def test_is_kickoff_at_least_one_day_away(self):
a = datetime(2020, 1, 3, 14, 30, 00, tzinfo=UTC) # False - over one day
b = datetime(2020, 1, 3, 15, 30, 00, tzinfo=UTC) # True - exactly one day
c = datetime(2020, 1, 4, 15, 30, 00, tzinfo=UTC) # True - same time
d = datetime(2020, 2, 4, 15, 30, 00, tzinfo=UTC) # True - very far away in the future
expected_results = [False, True, True, True]
for (date, result) in zip([a, b, c, d], expected_results):
at_least_one_day_away = SeasonHelper.is_kickoff_at_least_one_day_away(date, 2020)
self.assertEqual(at_least_one_day_away, result)
def test_kickoff_datetime(self):
# 2011 - Saturday the 8th (https://en.wikipedia.org/wiki/Logo_Motion)
kickoff_2011 = datetime(2011, 1, 8, 10, 30, 00, tzinfo=timezone('EST'))
kickoff_2011_utc = kickoff_2011.astimezone(UTC)
self.assertEqual(SeasonHelper.kickoff_datetime_est(year=2011), kickoff_2011)
self.assertEqual(SeasonHelper.kickoff_datetime_utc(year=2011), kickoff_2011_utc)
# 2010 - Saturday the 9th (https://en.wikipedia.org/wiki/Breakaway_(FIRST))
kickoff_2010 = datetime(2010, 1, 9, 10, 30, 00, tzinfo=timezone('EST'))
kickoff_2010_utc = kickoff_2010.astimezone(UTC)
self.assertEqual(SeasonHelper.kickoff_datetime_est(year=2010), kickoff_2010)
self.assertEqual(SeasonHelper.kickoff_datetime_utc(year=2010), kickoff_2010_utc)
# 2009 - Saturday the 3rd (https://en.wikipedia.org/wiki/Lunacy_(FIRST)
kickoff_2009 = datetime(2009, 1, 3, 10, 30, 00, tzinfo=timezone('EST'))
kickoff_2009_utc = kickoff_2009.astimezone(UTC)
self.assertEqual(SeasonHelper.kickoff_datetime_est(year=2009), kickoff_2009)
self.assertEqual(SeasonHelper.kickoff_datetime_utc(year=2009), kickoff_2009_utc)
|
the-stack_106_24952 | # !/usr/bin/env python3
# -*- encoding: utf-8 -*-
if __name__ == "__main__":
lst = list(map(int, input().split()))
i_max = lst.index(max(lst))
lst[i_max], lst[0] = lst[0], lst[i_max]
print(*lst)
|
the-stack_106_24953 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test callback function."""
import numpy as np
import os
import pytest
import stat
import mindspore.common.dtype as mstype
import mindspore.nn as nn
from mindspore.common.api import ms_function
from mindspore.common.tensor import Tensor
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn.optim import Momentum
from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext, _checkpoint_cb_for_save_op, \
LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist, \
_build_callbacks, CheckpointConfig, _set_cur_net
class Net(nn.Cell):
"""Net definition."""
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal')
self.bn = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.flatten = nn.Flatten()
self.fc = nn.Dense(64 * 222 * 222, 3)
@ms_function
def construct(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.flatten(x)
out = self.fc(x)
return out
class LossNet(nn.Cell):
""" LossNet definition """
def __init__(self):
super(LossNet, self).__init__()
self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal', pad_mode='valid')
self.bn = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.flatten = nn.Flatten()
self.fc = nn.Dense(64 * 222 * 222, 3) # padding=0
self.loss = nn.SoftmaxCrossEntropyWithLogits()
@ms_function
def construct(self, x, y):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.flatten(x)
x = self.fc(x)
out = self.loss(x, y)
return out
def test_Model_Checkpoint_prefix_invalid():
"""Test ModelCheckpoint prefix invalid."""
with pytest.raises(ValueError):
ModelCheckpoint(123)
ModelCheckpoint(directory="./")
with pytest.raises(TypeError):
ModelCheckpoint(config='type_error')
ModelCheckpoint(config=CheckpointConfig())
ModelCheckpoint(prefix="ckpt_2", directory="./test_files")
def test_save_checkpoint():
"""Test save checkpoint."""
train_config = CheckpointConfig(
save_checkpoint_steps=16,
save_checkpoint_seconds=0,
keep_checkpoint_max=5,
keep_checkpoint_per_n_minutes=0)
cb_params = _InternalCallbackParam()
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits()
optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
network_ = WithLossCell(net, loss)
_train_network = TrainOneStepCell(network_, optim)
cb_params.train_network = _train_network
cb_params.epoch_num = 10
cb_params.cur_epoch_num = 5
cb_params.cur_step_num = 0
cb_params.batch_num = 32
ckpoint_cb = ModelCheckpoint(prefix="test_ckpt", directory='./test_files', config=train_config)
run_context = RunContext(cb_params)
ckpoint_cb.begin(run_context)
ckpoint_cb.step_end(run_context)
if os.path.exists('./test_files/test_ckpt-model.pkl'):
os.chmod('./test_files/test_ckpt-model.pkl', stat.S_IWRITE)
os.remove('./test_files/test_ckpt-model.pkl')
def test_loss_monitor_sink_mode():
"""Test loss monitor sink mode."""
cb_params = _InternalCallbackParam()
cb_params.cur_epoch_num = 4
cb_params.cur_step_num = 2
cb_params.batch_num = 2
cb_params.net_outputs = Tensor(2.0)
run_context = RunContext(cb_params)
loss_cb = LossMonitor(1)
callbacks = [loss_cb]
callbacklist = _build_callbacks(callbacks)
callbacklist.begin(run_context)
callbacklist.epoch_begin(run_context)
callbacklist.step_begin(run_context)
callbacklist.step_end(run_context)
callbacklist.epoch_end(run_context)
callbacklist.end(run_context)
def test_loss_monitor_normal_mode():
"""Test loss monitor normal(non-sink) mode."""
cb_params = _InternalCallbackParam()
run_context = RunContext(cb_params)
loss_cb = LossMonitor(1)
cb_params.cur_epoch_num = 4
cb_params.cur_step_num = 1
cb_params.batch_num = 1
cb_params.net_outputs = Tensor(2.0)
loss_cb.begin(run_context)
loss_cb.epoch_begin(run_context)
loss_cb.step_begin(run_context)
loss_cb.step_end(run_context)
loss_cb.epoch_end(run_context)
loss_cb.end(run_context)
def test_check_file_name_not_str():
"""Test check file name not str."""
ret = _check_file_name_prefix(1)
assert not ret
def test_check_file_name_back_err():
"""Test check file name back err."""
ret = _check_file_name_prefix('abc.')
assert ret
def test_check_file_name_one_alpha():
"""Test check file name one alpha."""
ret = _check_file_name_prefix('a')
assert ret
ret = _check_file_name_prefix('_')
assert ret
def test_check_file_name_err():
"""Test check file name err."""
ret = _check_file_name_prefix('_123')
assert ret
def test_chg_ckpt_file_name_if_same_exist():
"""Test chg ckpt file name if same exist."""
_chg_ckpt_file_name_if_same_exist(directory="./test_files", prefix="ckpt")
def test_checkpoint_cb_for_save_op():
"""Test checkpoint cb for save op."""
parameter_list = []
one_param = {}
one_param['name'] = "conv1.weight"
one_param['data'] = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]), dtype=mstype.float32)
parameter_list.append(one_param)
_checkpoint_cb_for_save_op(parameter_list)
def test_checkpoint_cb_for_save_op_update_net():
"""Test checkpoint cb for save op."""
parameter_list = []
one_param = {}
one_param['name'] = "conv.weight"
one_param['data'] = Tensor(np.ones(shape=(64, 3, 3, 3)), dtype=mstype.float32)
parameter_list.append(one_param)
net = Net()
_set_cur_net(net)
_checkpoint_cb_for_save_op(parameter_list)
assert net.conv.weight.default_input.asnumpy()[0][0][0][0] == 1
def test_internal_callback_param():
"""Test Internal CallbackParam."""
cb_params = _InternalCallbackParam()
cb_params.member1 = 1
cb_params.member2 = "abc"
assert cb_params.member1 == 1
assert cb_params.member2 == "abc"
def test_checkpoint_save_ckpt_steps():
"""Test checkpoint save ckpt steps."""
train_config = CheckpointConfig(
save_checkpoint_steps=16,
save_checkpoint_seconds=0,
keep_checkpoint_max=5,
keep_checkpoint_per_n_minutes=0)
ckpt_cb = ModelCheckpoint(config=train_config)
cb_params = _InternalCallbackParam()
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits()
optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
network_ = WithLossCell(net, loss)
_train_network = TrainOneStepCell(network_, optim)
cb_params.train_network = _train_network
cb_params.epoch_num = 10
cb_params.cur_epoch_num = 5
cb_params.cur_step_num = 160
cb_params.batch_num = 32
run_context = RunContext(cb_params)
ckpt_cb.begin(run_context)
ckpt_cb.step_end(run_context)
ckpt_cb2 = ModelCheckpoint(config=train_config)
cb_params.cur_epoch_num = 1
cb_params.cur_step_num = 15
ckpt_cb2.begin(run_context)
ckpt_cb2.step_end(run_context)
def test_checkpoint_save_ckpt_seconds():
"""Test checkpoint save ckpt seconds."""
train_config = CheckpointConfig(
save_checkpoint_steps=16,
save_checkpoint_seconds=100,
keep_checkpoint_max=0,
keep_checkpoint_per_n_minutes=1)
ckpt_cb = ModelCheckpoint(config=train_config)
cb_params = _InternalCallbackParam()
net = Net()
loss = nn.SoftmaxCrossEntropyWithLogits()
optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
network_ = WithLossCell(net, loss)
_train_network = TrainOneStepCell(network_, optim)
cb_params.train_network = _train_network
cb_params.epoch_num = 10
cb_params.cur_epoch_num = 4
cb_params.cur_step_num = 128
cb_params.batch_num = 32
run_context = RunContext(cb_params)
ckpt_cb.begin(run_context)
ckpt_cb.step_end(run_context)
ckpt_cb2 = ModelCheckpoint(config=train_config)
cb_params.cur_epoch_num = 1
cb_params.cur_step_num = 16
ckpt_cb2.begin(run_context)
ckpt_cb2.step_end(run_context)
def test_build_callbacks():
"""Test_build_callbacks."""
ck_obj = ModelCheckpoint()
loss_cb_1 = LossMonitor(1)
callbacks = [None]
with pytest.raises(TypeError):
callbacks = _build_callbacks(callbacks)
callbacks = ['Error']
with pytest.raises(TypeError):
callbacks = _build_callbacks(callbacks)
callbacks = [ck_obj, loss_cb_1, 'Error', None]
with pytest.raises(TypeError):
callback_list = _build_callbacks(callbacks)
def test_RunContext():
"""Test RunContext."""
context_err = 666
with pytest.raises(TypeError):
context = RunContext(context_err)
cb_params = _InternalCallbackParam()
cb_params.member1 = 1
cb_params.member2 = "abc"
run_context = RunContext(cb_params)
run_context.original_args()
assert cb_params.member1 == 1
assert cb_params.member2 == "abc"
run_context.request_stop()
should_stop = run_context.get_stop_requested()
assert should_stop
def test_Checkpoint_Config():
"""Test CheckpointConfig all None or 0."""
with pytest.raises(ValueError):
CheckpointConfig(0, 0, 0, 0, True)
with pytest.raises(ValueError):
CheckpointConfig(0, None, 0, 0, True)
def test_step_end_save_graph():
"""Test save checkpoint."""
train_config = CheckpointConfig(
save_checkpoint_steps=16,
save_checkpoint_seconds=0,
keep_checkpoint_max=5,
keep_checkpoint_per_n_minutes=0)
cb_params = _InternalCallbackParam()
net = LossNet()
input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32))
input_label = Tensor(np.random.randint(0, 3, [1, 3]).astype(np.float32))
net(input_data, input_label)
cb_params.train_network = net
cb_params.epoch_num = 10
cb_params.cur_epoch_num = 5
cb_params.cur_step_num = 0
cb_params.batch_num = 32
ckpoint_cb = ModelCheckpoint(prefix="test", directory='./test_files', config=train_config)
run_context = RunContext(cb_params)
ckpoint_cb.begin(run_context)
# import pdb;pdb.set_trace()
ckpoint_cb.step_end(run_context)
assert os.path.exists('./test_files/test-graph.meta') == True
if os.path.exists('./test_files/test-graph.meta'):
os.chmod('./test_files/test-graph.meta', stat.S_IWRITE)
os.remove('./test_files/test-graph.meta')
ckpoint_cb.step_end(run_context)
assert os.path.exists('./test_files/test-graph.meta') == False
|
the-stack_106_24955 | import unittest
import numpy as np
from chariot.transformer.vocabulary import Vocabulary
from gcn.graph import DependencyGraph, SimilarityGraph, StaticGraph
from gcn.visualize.draw import AttentionDrawer
class TestDraw(unittest.TestCase):
def test_draw_dependency_graph(self):
sentence = "I am living at house"
graph_builder = DependencyGraph("en")
attention = np.array([
[0, 0, 1, 0, 0],
[0, 0, 0.2, 0, 0],
[0, 0, 0.7, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0.5, 0],
])
drawer = AttentionDrawer(graph_builder)
graph = drawer.draw(sentence, attention)
drawer.show(graph)
def test_draw_similarity_graph(self):
sentence = "I am building similarity graph structure"
graph_builder = SimilarityGraph("en")
drawer = AttentionDrawer(graph_builder)
graph = drawer.draw(sentence)
drawer.show(graph)
def test_draw_static_graph(self):
sentence = "I am static graph"
graph_builder = StaticGraph("en", kind="previous")
drawer = AttentionDrawer(graph_builder)
graph = drawer.draw(sentence)
drawer.show(graph)
|
the-stack_106_24956 | # encoding: utf-8
import math
import torch
from torch import nn
from .AIBN import AIBNorm2d
from clustercontrast.models.pooling import build_pooling_layer
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class SEBlock(nn.Module):
def __init__(self,channel,r=16):
super(SEBlock,self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc=nn.Sequential(nn.Linear(channel,channel//r,bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel//r,channel,bias=False),
nn.Sigmoid(),)
def forward(self,x):
b, c, _, _ = x.size()
# Squeeze
y = self.avg_pool(x).view(b,c) # shape(b,c)
# Excitation
y = self.fc(y).view(b,c,1,1) # shape(b,c,1,1)
# Fscale
y = torch.mul(x,y) # shape(b,c,16,8)
return y
class VoxelAtention(nn.Module):
def __init__(self,channel,r=16):
super(VoxelAtention,self).__init__()
# Spatial attention
self.sigmoid = nn.Sigmoid()
# Channel attention
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(channel, channel // r, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // r, channel, bias=False),
nn.Sigmoid(), )
def forward(self, x):
# spatial attention
avg_out = torch.mean(x, dim=1, keepdim=True) # along the channel dim calculate avg
spatial_out = self.sigmoid(avg_out)
# channel attention
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c) # Squeeze
y = self.fc(y).view(b, c, 1, 1) # Excitation
y = torch.mul(spatial_out, y) # Fscale
# add attention to input
y = torch.mul(x,y)
return y
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class AIBNBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, adaptive_weight=None, generate_weight=True):
super(AIBNBottleneck, self).__init__()
if adaptive_weight is None: # alpha can be learned
self.adaptive_weight = nn.Parameter(torch.ones(1) * 0.1)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = AIBNorm2d(
planes, adaptive_weight=self.adaptive_weight, generate_weight=generate_weight)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = AIBNorm2d(
planes, adaptive_weight=self.adaptive_weight, generate_weight=generate_weight)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = AIBNorm2d(
planes * 4, adaptive_weight=self.adaptive_weight, generate_weight=generate_weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.downsample is not None:
self.downsample[1].adaptive_weight = self.adaptive_weight
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class AIBNResNet(nn.Module):
def __init__(self, last_stride=2, block=AIBNBottleneck, layers=[3, 4, 6, 3],pooling_type='avg'):
self.inplanes = 64
super(AIBNResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer_normal(Bottleneck, 64, layers[0])
self.layer2 = self._make_layer_normal(Bottleneck, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, adaptive_weight=None)
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride, adaptive_weight=None)
# self.seblock = SEBlock(256*4) # SEBlock added by yxt
# self.voxel_attention=VoxelAtention(512*4)
# self.va_0=VoxelAtention(64)
# self.va_1=VoxelAtention(64*4)
# self.va_2=VoxelAtention(128*4)
# self.va_3=VoxelAtention(256*4)
# self.va_4=VoxelAtention(512*4)
# self.adaptive_pool = nn.AdaptiveAvgPool2d((1, 1))
self.adaptive_pool = build_pooling_layer(pooling_type)
def _make_layer(self, block, planes, blocks, stride=1, adaptive_weight=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
AIBNorm2d(planes * block.expansion,
adaptive_weight=adaptive_weight, generate_weight=True),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,
adaptive_weight=adaptive_weight, generate_weight=True))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
if i == (blocks - 1):
layers.append(block(
self.inplanes, planes, adaptive_weight=adaptive_weight, generate_weight=True))
else:
layers.append(block(
self.inplanes, planes, adaptive_weight=adaptive_weight, generate_weight=True))
return nn.Sequential(*layers)
def _make_layer_normal(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # add missed relu
x = self.maxpool(x)
# x = self.va_0(x) # <------
x = self.layer1(x)
# x = self.va_1(x) # <----
x = self.layer2(x)
# x = self.va_2(x) # <-----
x = self.layer3(x)
# x = self.seblock(x) # added by yxt
# x = self.va_3(x) # <----- change voxel attention position
x = self.layer4(x)
# x =self.voxel_attention(x) # Voxel attention added by yxt
x = self.adaptive_pool(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if __name__ == '__main__':
resnet = AIBNResNet(last_stride=1,
block=AIBNBottleneck,
layers=[3, 4, 6, 3])
print(resnet.state_dict().keys())
|
the-stack_106_24957 | import json
import logging
import redis
from tsa.extensions import redis_pool
def query_dataset(iri):
return {
"related": query_related(iri),
"profile": query_profile(iri)
}
def query_related(ds_iri):
key = f'distrquery:{ds_iri}'
red = redis.Redis(connection_pool=redis_pool)
try:
return json.loads(red.get(key))
except TypeError:
return []
def query_profile(ds_iri):
key = f'dsanalyses:{ds_iri}'
red = redis.Redis(connection_pool=redis_pool)
log = logging.getLogger(__name__)
analysis = json.loads(red.get(key)) # raises TypeError if key is missing
supported_languages = ["cs", "en"]
output = {}
output["triples"] = analysis["generic"]["triples"]
output["classes"] = []
for cls in analysis["generic"]["classes"].keys():
iri = cls
count = analysis["generic"]["classes"][cls]
label = create_labels(ds_iri, supported_languages)
output["classes"].append({'iri': iri, 'count': count, 'label': label})
output["predicates"] = []
for pred in analysis["generic"]["predicates"].keys():
output["predicates"].append({
'iri': pred,
'count': analysis["generic"]["predicates"][pred]
})
output["concepts"] = []
if "concepts" in analysis["skos"]:
for concept in analysis["skos"]["concepts"].keys():
output["concepts"].append({
'iri': concept,
'label': create_labels(concept, supported_languages)
})
output["schemata"] = []
for schema in analysis["skos"]["schema"].keys():
output["schemata"].append({
'iri': schema,
'label': create_labels(schema, supported_languages)
})
dimensions, measures = set(), set()
datasets = analysis["cube"]["datasets"]
for ds in datasets.keys():
dimensions.update(datasets[ds]["dimensions"])
measures.update(datasets[ds]["measures"])
output["dimensions"], output["measures"] = [], []
for d in dimensions:
output["dimensions"].append({
'iri': d,
'label': create_labels(d, supported_languages)
})
for m in measures:
output["measures"].append({
'iri': m,
'label': create_labels(m, supported_languages)
})
return output
def create_labels(ds_iri, tags):
labels = query_label(ds_iri)
label = {}
for tag in tags:
label[tag] = ""
available = set()
if "default" in labels.keys():
for tag in tags:
label[tag] = labels["default"]
available.add(tag)
for tag in tags:
if tag in labels.keys():
label[tag] = labels[tag]
available.add(tag)
available = list(available)
if len(available) > 0:
for tag in tags:
if len(label[tag]) == 0:
label[tag] = label[available[0]] # put anything there
else:
log = logging.getLogger(__name__)
log.error(f'Missing labels for {ds_iri}')
return label
def query_label(ds_iri):
#LABELS: key = f'dstitle:{ds!s}:{t.language}' if t.language is not None else f'dstitle:{ds!s}'
#red.set(key, title)
red = redis.Redis(connection_pool=redis_pool)
log = logging.getLogger(__name__)
result = {}
for x in red.keys(f'dstitle:{ds_iri!s}*'):
prefix_lang = f'dstitle:{ds_iri!s}:'
if x.startswith(prefix_lang):
language_code = x[len(prefix_lang):]
title = red.get(x)
result[language_code] = title
else:
result['default'] = red.get(x)
return result |
the-stack_106_24960 | import numpy as np
from scipy import integrate
from reactorPhysics import reactorSystem
from reactorPhysics import qFuel
from reactorPhysics import rho
#import matplotlib.pyplot as pl
import time
class LegoReactor(object):
"""
Provides methods to interact with the point kenetics model.
The reactor system state vector:
S = [neutrons/cc, pecursors/cc, fuelT, coolantT, rodPosition]
"""
def __init__(self, initialSystemState=[5.e7, 5.e7, 450., 450., 0.], tstep=0.01):
""" Initilize reactor system state """
self.S = initialSystemState
self.reactivity = rho(self.S, 0, 0, 0)
self.tstep = tstep
self.t = np.array([0, self.tstep])
self.hrate = 0.0 # rod movement rate [% / s]
self.rodSetPoint = 0.0 # initial rod setpoint [%]
self.mdotC = 1000.e3 # coolant flow rate [g / s]
self.coolantSetPoint = 1000.e3
self.pwrCtrl = False
self.scramToggle = False
# For Storage/Plotting
self.maxTime = 100. # maximum time storage history [s]
dataStorLength = int(self.maxTime / self.tstep)
self.time = np.zeros(dataStorLength)
self.storVals = np.zeros((5, dataStorLength))
def timeStep(self):
""" Step reactor system forward in time """
self.__preStep()
self.S = integrate.odeint(reactorSystem, self.S, self.t,
args=(self.hrate, self.tstep, self.mdotC))[-1]
self.reactivity = rho(self.S, 0, 0, 0)
self.t += self.tstep
self.storVals = np.roll(self.storVals, -1, axis=1)
self.time = np.roll(self.time, -1)
self.time[-1] = self.t[-1]
self.storVals[:, -1] = np.array([self.S])
def __preStep(self):
"""
Check for valid rod movements or SCRAM condition
"""
if self.pwrCtrl:
self.__controlPID()
else:
self.__rodCtrl()
if self.hrate < 0 and self.S[4] <= 0.:
# do not allow control rods below 0
self.hrate = 0.
elif self.hrate > 0 and self.S[4] >= 100.:
self.hrate = 0.
self.__controlCoolantRate()
self.__scramCheck()
if self.scramToggle:
# Insert control rods all the way
self.S[4] = 0.
self.hrate = 0.
def __scramCheck(self):
"""
Check for conditions which require us to SCRAM
"""
if self.S[2] > 1700:
# Fuel temp scram (Temp in Kelvin)
print("Fuel Temperature SCRAM setpoint Exceeded")
self.SCRAM()
elif self.S[3] > 700:
# Coolant temp scram
print("Coolant Temperature SCRAM setpoint Exceeded")
self.SCRAM()
else:
pass
def setTimeStep(self, tstep):
self.tstep = tstep
def setRodRate(self, rodRate):
if not self.pwrCtrl:
self.hrate = rodRate
def setRodPosition(self, rodPos):
self.rodSetPoint = rodPos
def setCoolantRate(self, mdotCin):
self.coolantSetPoint = mdotCin
def __controlCoolantRate(self):
diff = (self.coolantSetPoint - self.mdotC) / 10.
fnDiff = np.tanh(1.0 * abs(diff)) # Relax control rod into position
if self.coolantSetPoint > self.mdotC:
self.mdotC += 1. / self.tstep * fnDiff
elif self.coolantSetPoint < self.mdotC:
self.mdotC -= 1. / self.tstep * fnDiff
else:
pass
def togglePwrCtrl(self, pwrSet, pwrCtrlToggle=True):
"""
Set power in MW
"""
self.pwrSet = pwrSet
self.pwrCtrl = pwrCtrlToggle
self.pidBias = 0.0
self.hrate = 0.0
def __controlPID(self):
maxRate = 0.60 # maxumum rod movement rate in %/s
Kp = 0.0100000 # Proportional tunable const
Ki = 0.0001000 # Intergral tunable const
Kd = 0.0001000 # Derivitive tunable const
currentpwr = qFuel(self.S[0]) / 1.e6
errorFn = self.pwrSet - qFuel(self.storVals[0, :]) / 1.e6
errorIntegral = np.sum(errorFn[-100:]) # base integral error on past 100 values
errorDerivative = (errorFn[-1] - errorFn[-2]) / (self.tstep)
if hasattr(self, 'pwrSet'):
pidOut = self.pidBias + Kp * (self.pwrSet - currentpwr) + Ki * errorIntegral + Kd * errorDerivative
self.hrate = pidOut
if abs(self.hrate) > maxRate:
self.hrate = maxRate * (self.hrate / abs(self.hrate))
else:
self.togglePwrCtrl(qFuel(self.S[0]) / 1.e6)
def __rodCtrl(self):
diff = self.S[4] - self.rodSetPoint
fnDiff = np.tanh(1.0 * abs(diff)) # Relax control rod into position
if diff < 0.:
self.hrate = 0.5 * fnDiff
elif diff > 0.:
self.hrate = -0.5 * fnDiff
else:
self.hrate = 0.
def SCRAM(self, scramToggle=True):
"""
You crashed the reactor.
"""
self.scramToggle = scramToggle
def test():
"""
Test reactor in rod control and power control modes.
"""
i = 0
t0 = time.time()
legoReactor = LegoReactor()
legoReactor.setRodPosition(50.) # set rod position to 50% withdrawn
while i < 10000:
legoReactor.timeStep()
print("===================================")
print("Time [s] = %f" % legoReactor.t[-1])
print("Rod percent Withdrawn = %f" % legoReactor.S[4])
print("Reactor Power [MW] = %f " % float(qFuel(legoReactor.S[0]) / 1.e6))
print("Tfuel [K] = %f , Tcoolant [K] = %f" % (legoReactor.S[2], legoReactor.S[3]))
i += 1
i = 0
legoReactor.togglePwrCtrl(200.) # set reactor power to 200 MW
while i < 10000:
legoReactor.timeStep()
print("===================================")
print("Time [s] = %f" % legoReactor.t[-1])
print("Rod percent Withdrawn = %f" % legoReactor.S[4])
print("Reactor Power [MW] = %f " % float(qFuel(legoReactor.S[0]) / 1.e6))
print("Tfuel [K] = %f , Tcoolant [K] = %f" % (legoReactor.S[2], legoReactor.S[3]))
i += 1
t1 = time.time()
print(t1 - t0)
if __name__ == "__main__":
test()
|
the-stack_106_24963 | # 豆瓣美剧的爬取,数据会写入csv
import csv
import time
import jsonpath
import requests
class Douban:
def __init__(self):
self.url = 'https://movie.douban.com/j/search_subjects?' \
'type=tv&tag=%E7%BE%8E%E5%89%A7&sort=recommend&page_limit=20&page_start={}'
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/64.0.3282.204 Safari/537.36"
}
self.num = 0
self.writer = None
def run(self):
# 准备录入文件
f = open('./Douban-AmericanDrama.csv', 'w')
self.writer = csv.writer(f)
headers = ['title', 'rate', 'url', 'cover']
self.writer = csv.DictWriter(f, headers)
while True:
# 发起请求,得到响应
response = requests.get(self.url.format(self.num))
# json字符串进行解析,响应数据可以直接使用json()解析
json_dict = response.json() # type:dict
# 拿到数据
subjects = json_dict.get('subjects')
# 处理数据
if not subjects:
f.close()
return
map_res = map(self.write_in_csv, subjects)
print(list(map_res)[0])
self.num += 20
time.sleep(0.5)
def write_in_csv(self, subject):
# jsonpath 解析数据
title = jsonpath.jsonpath(subject, "$.title")[0]
rate = jsonpath.jsonpath(subject, "$.rate")[0]
url = jsonpath.jsonpath(subject, "$.url")[0]
cover = jsonpath.jsonpath(subject, "$.cover")[0]
# 组织数据
row = [title, rate, url, cover]
# csv 录入数据
self.writer.writerow(row)
return '第%d页录入完成' % (self.num / 20)
if __name__ == '__main__':
douban = Douban()
douban.run()
|
the-stack_106_24965 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"The Qiskit Terra setup file."
import os
import sys
from setuptools import setup, find_packages, Extension
try:
from Cython.Build import cythonize
except ImportError:
import subprocess
subprocess.call([sys.executable, '-m', 'pip', 'install', 'Cython>=0.27.1'])
from Cython.Build import cythonize
REQUIREMENTS = [
"jsonschema>=2.6",
"marshmallow>=3,<4",
"marshmallow_polyfield>=5.7,<6",
"networkx>=2.2;python_version>'3.5'",
# Networkx 2.4 is the final version with python 3.5 support.
"networkx>=2.2,<2.4;python_version=='3.5'",
"retworkx>=0.3.2",
"numpy>=1.17",
"ply>=3.10",
"psutil>=5",
"scipy>=1.4",
"sympy>=1.3",
"dill>=0.3",
"fastjsonschema>=2.10",
"python-constraint>=1.4",
"python-dateutil>=2.8.0",
]
# Add Cython extensions here
CYTHON_EXTS = ['utils', 'swap_trial']
CYTHON_MODULE = 'qiskit.transpiler.passes.routing.cython.stochastic_swap'
CYTHON_SOURCE_DIR = 'qiskit/transpiler/passes/routing/cython/stochastic_swap'
INCLUDE_DIRS = []
# Extra link args
LINK_FLAGS = []
# If on Win and not in MSYS2 (i.e. Visual studio compile)
if (sys.platform == 'win32' and os.environ.get('MSYSTEM') is None):
COMPILER_FLAGS = ['/O2']
# Everything else
else:
COMPILER_FLAGS = ['-O2', '-funroll-loops', '-std=c++11']
if sys.platform == 'darwin':
# These are needed for compiling on OSX 10.14+
COMPILER_FLAGS.append('-mmacosx-version-min=10.9')
LINK_FLAGS.append('-mmacosx-version-min=10.9')
EXT_MODULES = []
# Add Cython Extensions
for ext in CYTHON_EXTS:
mod = Extension(CYTHON_MODULE + '.' + ext,
sources=[CYTHON_SOURCE_DIR + '/' + ext + '.pyx'],
include_dirs=INCLUDE_DIRS,
extra_compile_args=COMPILER_FLAGS,
extra_link_args=LINK_FLAGS,
language='c++')
EXT_MODULES.append(mod)
# Read long description from README.
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'README.md')
with open(README_PATH) as readme_file:
README = readme_file.read()
setup(
name="qiskit-terra",
version="0.14.0",
description="Software for developing quantum computing programs",
long_description=README,
long_description_content_type='text/markdown',
url="https://github.com/Qiskit/qiskit-terra",
author="Qiskit Development Team",
author_email="[email protected]",
license="Apache 2.0",
classifiers=[
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
],
keywords="qiskit sdk quantum",
packages=find_packages(exclude=['test*']),
install_requires=REQUIREMENTS,
setup_requires=['Cython>=0.27.1'],
include_package_data=True,
python_requires=">=3.5",
extras_require={
'visualization': ['matplotlib>=2.1', 'ipywidgets>=7.3.0',
'pydot', "pillow>=4.2.1", "pylatexenc>=1.4",
"seaborn>=0.9.0", "pygments>=2.4"],
'full-featured-simulators': ['qiskit-aer>=0.1'],
'crosstalk-pass': ['z3-solver>=4.7'],
},
project_urls={
"Bug Tracker": "https://github.com/Qiskit/qiskit-terra/issues",
"Documentation": "https://qiskit.org/documentation/",
"Source Code": "https://github.com/Qiskit/qiskit-terra",
},
ext_modules=cythonize(EXT_MODULES),
zip_safe=False
)
|
the-stack_106_24966 | """
This module implements some standard regression models: OLS and WLS
models, as well as an AR(p) regression model.
Models are specified with a design matrix and are fit using their
'fit' method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
'Introduction to Linear Regression Analysis', Douglas C. Montgomery,
Elizabeth A. Peck, G. Geoffrey Vining. Wiley, 2006.
"""
__docformat__ = 'restructuredtext en'
import warnings
import numpy as np
from nibabel.onetime import auto_attr
from numpy.linalg import matrix_rank
import scipy.linalg as spl
from nilearn._utils.helpers import rename_parameters
from nilearn.glm.model import LikelihoodModelResults
from nilearn._utils.glm import positive_reciprocal
class OLSModel(object):
""" A simple ordinary least squares model.
Parameters
----------
design : array-like
This is your design matrix. Data are assumed to be column ordered
with observations in rows.
Methods
-------
model.__init___(design)
model.logL(b=self.beta, Y)
Attributes
----------
design : ndarray
This is the design, or X, matrix.
whitened_design : ndarray
This is the whitened design matrix.
`design` == `whitened_design` by default for the OLSModel,
though models that inherit from the OLSModel will whiten the design.
calc_beta : ndarray
This is the Moore-Penrose pseudoinverse of the whitened design matrix.
normalized_cov_beta : ndarray
``np.dot(calc_beta, calc_beta.T)``
df_residuals : scalar
Degrees of freedom of the residuals. Number of observations less the
rank of the design.
df_model : scalar
Degrees of freedome of the model. The rank of the design.
Note
----
This class is experimental.
It may change in any future release of Nilearn.
"""
def __init__(self, design):
"""
Parameters
----------
design : array-like
This is your design matrix.
Data are assumed to be column ordered with
observations in rows.
"""
super(OLSModel, self).__init__()
self.initialize(design)
def initialize(self, design):
# PLEASE don't assume we have a constant...
# TODO: handle case for noconstant regression
self.design = design
self.whitened_design = self.whiten(self.design)
self.calc_beta = spl.pinv(self.whitened_design)
self.normalized_cov_beta = np.dot(self.calc_beta,
np.transpose(self.calc_beta))
self.df_total = self.whitened_design.shape[0]
eps = np.abs(self.design).sum() * np.finfo(np.float).eps
self.df_model = matrix_rank(self.design, eps)
self.df_residuals = self.df_total - self.df_model
@auto_attr
def df_resid(self):
warnings.warn("'df_resid' from OLSModel"
"has been deprecated and will be removed. "
"Please use 'df_residuals'.",
FutureWarning)
return self.df_residuals
@auto_attr
def wdesign(self):
warnings.warn("'wdesign' from OLSModel"
"has been deprecated and will be removed. "
"Please use 'whitened_design'.",
FutureWarning)
return self.whitened_design
def logL(self, beta, Y, nuisance=None):
r''' Returns the value of the loglikelihood function at beta.
Given the whitened design matrix, the loglikelihood is evaluated
at the parameter vector, beta, for the dependent variable, Y
and the nuisance parameter, sigma.
Parameters
----------
beta : ndarray
The parameter estimates. Must be of length df_model.
Y : ndarray
The dependent variable
nuisance : dict, optional
A dict with key 'sigma', which is an optional estimate of sigma.
If None, defaults to its maximum likelihood estimate
(with beta fixed) as
``sum((Y - X*beta)**2) / n``, where n=Y.shape[0], X=self.design.
Returns
-------
loglf : float
The value of the loglikelihood function.
Notes
-----
The log-Likelihood Function is defined as
.. math::
\ell(\beta,\sigma,Y)=
-\frac{n}{2}\log(2\pi\sigma^2) - \|Y-X\beta\|^2/(2\sigma^2)
The parameter :math:`\sigma` above is what is sometimes referred to
as a nuisance parameter. That is, the likelihood is considered as a
function of :math:`\beta`, but to evaluate it, a value of
:math:`\sigma` is needed.
If :math:`\sigma` is not provided,
then its maximum likelihood estimate:
.. math::
\hat{\sigma}(\beta) = \frac{\text{SSE}(\beta)}{n}
is plugged in. This likelihood is now a function of only :math:`\beta`
and is technically referred to as a profile-likelihood.
References
----------
* W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
'''
# This is overwriting an abstract method of LikelihoodModel
X = self.whitened_design
wY = self.whiten(Y)
r = wY - np.dot(X, beta)
n = self.df_total
SSE = (r ** 2).sum(0)
if nuisance is None:
sigmasq = SSE / n
else:
sigmasq = nuisance['sigma']
loglf = - n / 2. * np.log(2 * np.pi * sigmasq) - SSE / (2 * sigmasq)
return loglf
def whiten(self, X):
""" Whiten design matrix
Parameters
----------
X : array
design matrix
Returns
-------
whitened_X : array
This matrix is the matrix whose pseudoinverse is ultimately
used in estimating the coefficients. For OLSModel, it is
does nothing. For WLSmodel, ARmodel, it pre-applies
a square root of the covariance matrix to X.
"""
return X
def fit(self, Y):
""" Fit model to data `Y`
Full fit of the model including estimate of covariance matrix,
(whitened) residuals and scale.
Parameters
----------
Y : array-like
The dependent variable for the Least Squares problem.
Returns
-------
fit : RegressionResults
"""
# Other estimates of the covariance matrix for a heteroscedastic
# regression model can be implemented in WLSmodel. (Weighted least
# squares models assume covariance is diagonal, i.e. heteroscedastic).
wY = self.whiten(Y)
beta = np.dot(self.calc_beta, wY)
wresid = wY - np.dot(self.whitened_design, beta)
dispersion = np.sum(wresid ** 2, 0) / (
self.whitened_design.shape[0] - self.whitened_design.shape[1]
)
lfit = RegressionResults(beta, Y, self,
wY, wresid, dispersion=dispersion,
cov=self.normalized_cov_beta)
return lfit
class ARModel(OLSModel):
""" A regression model with an AR(p) covariance structure.
In terms of a LikelihoodModel, the parameters
are beta, the usual regression parameters,
and sigma, a scalar nuisance parameter that
shows up as multiplier in front of the AR(p) covariance.
Note
----
This class is experimental.
It may change in any future release of Nilearn.
"""
def __init__(self, design, rho):
""" Initialize AR model instance
Parameters
----------
design : ndarray
2D array with design matrix
rho : int or array-like
If int, gives order of model, and initializes rho to zeros. If
ndarray, gives initial estimate of rho. Be careful as ``ARModel(X,
1) != ARModel(X, 1.0)``.
"""
if isinstance(rho, int):
self.order = rho
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0, 1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
super(ARModel, self).__init__(design)
def whiten(self, X):
""" Whiten a series of columns according to AR(p) covariance structure
Parameters
----------
X : array-like of shape (n_features)
array to whiten
Returns
-------
whitened_X : ndarray
X whitened with order self.order AR
"""
X = np.asarray(X, np.float64)
whitened_X = X.copy()
for i in range(self.order):
whitened_X[(i + 1):] = (whitened_X[(i + 1):]
- self.rho[i]
* X[0: - (i + 1)]
)
return whitened_X
class RegressionResults(LikelihoodModelResults):
"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Note
----
This class is experimental.
It may change in any future release of Nilearn.
"""
@rename_parameters(
{'wresid': 'whitened_residuals', 'wY': 'whitened_Y'},
lib_name='Nistats'
)
def __init__(self, theta, Y, model, whitened_Y, whitened_residuals,
cov=None, dispersion=1., nuisance=None):
"""See LikelihoodModelResults constructor.
The only difference is that the whitened Y and residual values
are stored for a regression model.
"""
LikelihoodModelResults.__init__(self, theta, Y, model, cov,
dispersion, nuisance)
self.whitened_Y = whitened_Y
self.whitened_residuals = whitened_residuals
self.whitened_design = model.whitened_design
@auto_attr
def wdesign(self):
warnings.warn("'wdesign' from RegressionResults"
"has been deprecated and will be removed. "
"Please use 'whitened_design'.",
FutureWarning)
return self.whitened_design
@auto_attr
def wY(self):
warnings.warn("'wY' from RegressionResults "
"has been deprecated and will be removed. "
"Please use 'whitened_Y' instead.",
FutureWarning,
)
return self.whitened_Y
@auto_attr
def wresid(self):
warnings.warn("'wresid' from RegressionResults "
"has been deprecated and will be removed. "
"Please use 'whitened_residuals' instead.",
FutureWarning,
)
return self.whitened_residuals
@auto_attr
def resid(self):
warnings.warn("'resid' from RegressionResults "
"has been deprecated and will be removed. "
"Please use 'residuals' instead.",
FutureWarning,
)
return self.residuals
@auto_attr
def residuals(self):
"""
Residuals from the fit.
"""
return self.Y - self.predicted
@auto_attr
def norm_resid(self):
warnings.warn("'norm_resid' from RegressionResults "
"has been deprecated and will be removed. "
"Please use 'normalized_residuals' instead.",
FutureWarning,
)
return self.normalized_residuals
@auto_attr
def normalized_residuals(self):
"""
Residuals, normalized to have unit length.
Notes
-----
Is this supposed to return "stanardized residuals,"
residuals standardized
to have mean zero and approximately unit variance?
d_i = e_i / sqrt(MS_E)
Where MS_E = SSE / (n - k)
See: Montgomery and Peck 3.2.1 p. 68
Davidson and MacKinnon 15.2 p 662
"""
return self.residuals * positive_reciprocal(np.sqrt(self.dispersion))
@auto_attr
def predicted(self):
""" Return linear predictor values from a design matrix.
"""
beta = self.theta
# the LikelihoodModelResults has parameters named 'theta'
X = self.whitened_design
return np.dot(X, beta)
@auto_attr
def SSE(self):
"""Error sum of squares. If not from an OLS model this is "pseudo"-SSE.
"""
return (self.whitened_residuals ** 2).sum(0)
@auto_attr
def r_square(self):
"""Proportion of explained variance.
If not from an OLS model this is "pseudo"-R2.
"""
return np.var(self.predicted, 0) / np.var(self.whitened_Y, 0)
@auto_attr
def MSE(self):
""" Mean square (error) """
return self.SSE / self.df_residuals
class SimpleRegressionResults(LikelihoodModelResults):
"""This class contains only information of the model fit necessary
for contast computation.
Its intended to save memory when details of the model are unnecessary.
Note
----
This class is experimental.
It may change in any future release of Nilearn.
"""
def __init__(self, results):
"""See LikelihoodModelResults constructor.
The only difference is that the whitened Y and residual values
are stored for a regression model.
"""
self.theta = results.theta
self.cov = results.cov
self.dispersion = results.dispersion
self.nuisance = results.nuisance
self.df_total = results.Y.shape[0]
self.df_model = results.model.df_model
# put this as a parameter of LikelihoodModel
self.df_residuals = self.df_total - self.df_model
def logL(self, Y):
"""
The maximized log-likelihood
"""
raise ValueError('can not use this method for simple results')
def resid(self, Y):
warnings.warn("'resid()' from SimpleRegressionResults"
" has been deprecated and will be removed. "
"Please use 'residuals()'.",
FutureWarning,
)
return self.residuals(Y)
def residuals(self, Y):
"""
Residuals from the fit.
"""
return Y - self.predicted
@auto_attr
def df_resid(self):
warnings.warn("The attribute 'df_resid' from OLSModel"
"has been deprecated and will be removed. "
"Please use 'df_residuals'.",
FutureWarning)
return self.df_residuals
def norm_resid(self, Y):
warnings.warn("'SimpleRegressionResults.norm_resid' method "
"has been deprecated and will be removed. "
"Please use 'normalized_residuals'.",
FutureWarning,
)
return self.normalized_residuals(Y)
def normalized_residuals(self, Y):
"""
Residuals, normalized to have unit length.
Notes
-----
Is this supposed to return "stanardized residuals,"
residuals standardized
to have mean zero and approximately unit variance?
d_i = e_i / sqrt(MS_E)
Where MS_E = SSE / (n - k)
See: Montgomery and Peck 3.2.1 p. 68
Davidson and MacKinnon 15.2 p 662
"""
return (self.residuals(Y)
* positive_reciprocal(np.sqrt(self.dispersion))
)
def predicted(self):
""" Return linear predictor values from a design matrix.
"""
beta = self.theta
# the LikelihoodModelResults has parameters named 'theta'
X = self.model.design
return np.dot(X, beta)
|
the-stack_106_24967 | """
Utility RPython functions to inspect objects in the GC.
"""
from pypy.rpython.lltypesystem import lltype, llmemory, rffi
from pypy.rlib.objectmodel import free_non_gc_object
from pypy.rpython.module.ll_os import underscore_on_windows
from pypy.rlib import rposix, rgc
from pypy.rpython.memory.support import AddressDict, get_address_stack
# ---------- implementation of pypy.rlib.rgc.get_rpy_roots() ----------
def _counting_rpy_root(obj, gc):
gc._count_rpy += 1
def _do_count_rpy_roots(gc):
gc._count_rpy = 0
gc.enumerate_all_roots(_counting_rpy_root, gc)
return gc._count_rpy
def _append_rpy_root(obj, gc):
# Can use the gc list, but should not allocate!
# It is essential that the list is not resizable!
lst = gc._list_rpy
index = gc._count_rpy
if index >= len(lst):
raise ValueError
gc._count_rpy = index + 1
lst[index] = llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
def _do_append_rpy_roots(gc, lst):
gc._count_rpy = 0
gc._list_rpy = lst
gc.enumerate_all_roots(_append_rpy_root, gc)
gc._list_rpy = None
def get_rpy_roots(gc):
count = _do_count_rpy_roots(gc)
extra = 16
while True:
result = [lltype.nullptr(llmemory.GCREF.TO)] * (count + extra)
try:
_do_append_rpy_roots(gc, result)
except ValueError:
extra *= 3
else:
return result
# ---------- implementation of pypy.rlib.rgc.get_rpy_referents() ----------
def _count_rpy_referent(pointer, gc):
gc._count_rpy += 1
def _do_count_rpy_referents(gc, gcref):
gc._count_rpy = 0
gc.trace(llmemory.cast_ptr_to_adr(gcref), _count_rpy_referent, gc)
return gc._count_rpy
def _append_rpy_referent(pointer, gc):
# Can use the gc list, but should not allocate!
# It is essential that the list is not resizable!
lst = gc._list_rpy
index = gc._count_rpy
if index >= len(lst):
raise ValueError
gc._count_rpy = index + 1
lst[index] = llmemory.cast_adr_to_ptr(pointer.address[0],
llmemory.GCREF)
def _do_append_rpy_referents(gc, gcref, lst):
gc._count_rpy = 0
gc._list_rpy = lst
gc.trace(llmemory.cast_ptr_to_adr(gcref), _append_rpy_referent, gc)
def get_rpy_referents(gc, gcref):
count = _do_count_rpy_referents(gc, gcref)
result = [lltype.nullptr(llmemory.GCREF.TO)] * count
_do_append_rpy_referents(gc, gcref, result)
return result
# ----------
def get_rpy_memory_usage(gc, gcref):
return gc.get_size_incl_hash(llmemory.cast_ptr_to_adr(gcref))
def get_rpy_type_index(gc, gcref):
typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref))
return gc.get_member_index(typeid)
def is_rpy_instance(gc, gcref):
typeid = gc.get_type_id(llmemory.cast_ptr_to_adr(gcref))
return gc.is_rpython_class(typeid)
# ----------
raw_os_write = rffi.llexternal(underscore_on_windows+'write',
[rffi.INT, llmemory.Address, rffi.SIZE_T],
rffi.SIZE_T,
sandboxsafe=True, _nowrapper=True)
AddressStack = get_address_stack()
class HeapDumper(object):
_alloc_flavor_ = "raw"
BUFSIZE = 8192 # words
def __init__(self, gc, fd):
self.gc = gc
self.gcflag = gc.gcflag_extra
self.fd = rffi.cast(rffi.INT, fd)
self.writebuffer = lltype.malloc(rffi.SIGNEDP.TO, self.BUFSIZE,
flavor='raw')
self.buf_count = 0
if self.gcflag == 0:
self.seen = AddressDict()
self.pending = AddressStack()
def delete(self):
if self.gcflag == 0:
self.seen.delete()
self.pending.delete()
lltype.free(self.writebuffer, flavor='raw')
free_non_gc_object(self)
def flush(self):
if self.buf_count > 0:
bytes = self.buf_count * rffi.sizeof(rffi.LONG)
count = raw_os_write(self.fd,
rffi.cast(llmemory.Address, self.writebuffer),
rffi.cast(rffi.SIZE_T, bytes))
if rffi.cast(lltype.Signed, count) != bytes:
raise OSError(rposix.get_errno(), "raw_os_write failed")
self.buf_count = 0
flush._dont_inline_ = True
def write(self, value):
x = self.buf_count
self.writebuffer[x] = value
x += 1
self.buf_count = x
if x == self.BUFSIZE:
self.flush()
write._always_inline_ = True
# ----------
def write_marker(self):
self.write(0)
self.write(0)
self.write(0)
self.write(-1)
def writeobj(self, obj):
gc = self.gc
typeid = gc.get_type_id(obj)
self.write(llmemory.cast_adr_to_int(obj))
self.write(gc.get_member_index(typeid))
self.write(gc.get_size_incl_hash(obj))
gc.trace(obj, self._writeref, None)
self.write(-1)
def _writeref(self, pointer, _):
obj = pointer.address[0]
self.write(llmemory.cast_adr_to_int(obj))
self.add(obj)
def add(self, obj):
if self.gcflag == 0:
if not self.seen.contains(obj):
self.seen.setitem(obj, obj)
self.pending.append(obj)
else:
hdr = self.gc.header(obj)
if (hdr.tid & self.gcflag) == 0:
hdr.tid |= self.gcflag
self.pending.append(obj)
def add_roots(self):
self.gc.enumerate_all_roots(_hd_add_root, self)
pendingroots = self.pending
self.pending = AddressStack()
self.walk(pendingroots)
pendingroots.delete()
self.write_marker()
def walk(self, pending):
while pending.non_empty():
self.writeobj(pending.pop())
# ----------
# A simplified copy of the above, to make sure we walk again all the
# objects to clear the 'gcflag'.
def unwriteobj(self, obj):
gc = self.gc
gc.trace(obj, self._unwriteref, None)
def _unwriteref(self, pointer, _):
obj = pointer.address[0]
self.unadd(obj)
def unadd(self, obj):
assert self.gcflag != 0
hdr = self.gc.header(obj)
if (hdr.tid & self.gcflag) != 0:
hdr.tid &= ~self.gcflag
self.pending.append(obj)
def clear_gcflag_again(self):
self.gc.enumerate_all_roots(_hd_unadd_root, self)
pendingroots = self.pending
self.pending = AddressStack()
self.unwalk(pendingroots)
pendingroots.delete()
def unwalk(self, pending):
while pending.non_empty():
self.unwriteobj(pending.pop())
def _hd_add_root(obj, heap_dumper):
heap_dumper.add(obj)
def _hd_unadd_root(obj, heap_dumper):
heap_dumper.unadd(obj)
def dump_rpy_heap(gc, fd):
heapdumper = HeapDumper(gc, fd)
heapdumper.add_roots()
heapdumper.walk(heapdumper.pending)
heapdumper.flush()
if heapdumper.gcflag != 0:
heapdumper.clear_gcflag_again()
heapdumper.unwalk(heapdumper.pending)
heapdumper.delete()
return True
def get_typeids_z(gc):
srcaddress = gc.root_walker.gcdata.typeids_z
return llmemory.cast_adr_to_ptr(srcaddress, lltype.Ptr(rgc.ARRAY_OF_CHAR))
|
the-stack_106_24969 | """
test.test_component_switch
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests switch component.
"""
# pylint: disable=too-many-public-methods,protected-access
import unittest
import os
import homeassistant as ha
import homeassistant.loader as loader
import homeassistant.util as util
import homeassistant.components as components
import homeassistant.components.light as light
import mock_toggledevice_platform
from helper import mock_service, get_test_home_assistant
class TestLight(unittest.TestCase):
""" Test the switch module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant()
loader.prepare(self.hass)
loader.set_component('light.test', mock_toggledevice_platform)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass._pool.stop()
user_light_file = self.hass.get_config_path(light.LIGHT_PROFILES_FILE)
if os.path.isfile(user_light_file):
os.remove(user_light_file)
def test_methods(self):
""" Test if methods call the services as expected. """
# Test is_on
self.hass.states.set('light.test', components.STATE_ON)
self.assertTrue(light.is_on(self.hass, 'light.test'))
self.hass.states.set('light.test', components.STATE_OFF)
self.assertFalse(light.is_on(self.hass, 'light.test'))
self.hass.states.set(light.ENTITY_ID_ALL_LIGHTS, components.STATE_ON)
self.assertTrue(light.is_on(self.hass))
self.hass.states.set(light.ENTITY_ID_ALL_LIGHTS, components.STATE_OFF)
self.assertFalse(light.is_on(self.hass))
# Test turn_on
turn_on_calls = mock_service(
self.hass, light.DOMAIN, components.SERVICE_TURN_ON)
light.turn_on(
self.hass,
entity_id='entity_id_val',
transition='transition_val',
brightness='brightness_val',
rgb_color='rgb_color_val',
xy_color='xy_color_val',
profile='profile_val')
self.hass._pool.block_till_done()
self.assertEqual(1, len(turn_on_calls))
call = turn_on_calls[-1]
self.assertEqual(light.DOMAIN, call.domain)
self.assertEqual(components.SERVICE_TURN_ON, call.service)
self.assertEqual('entity_id_val', call.data[components.ATTR_ENTITY_ID])
self.assertEqual('transition_val', call.data[light.ATTR_TRANSITION])
self.assertEqual('brightness_val', call.data[light.ATTR_BRIGHTNESS])
self.assertEqual('rgb_color_val', call.data[light.ATTR_RGB_COLOR])
self.assertEqual('xy_color_val', call.data[light.ATTR_XY_COLOR])
self.assertEqual('profile_val', call.data[light.ATTR_PROFILE])
# Test turn_off
turn_off_calls = mock_service(
self.hass, light.DOMAIN, components.SERVICE_TURN_OFF)
light.turn_off(
self.hass, entity_id='entity_id_val', transition='transition_val')
self.hass._pool.block_till_done()
self.assertEqual(1, len(turn_off_calls))
call = turn_off_calls[-1]
self.assertEqual(light.DOMAIN, call.domain)
self.assertEqual(components.SERVICE_TURN_OFF, call.service)
self.assertEqual('entity_id_val', call.data[components.ATTR_ENTITY_ID])
self.assertEqual('transition_val', call.data[light.ATTR_TRANSITION])
def test_services(self):
""" Test the provided services. """
mock_toggledevice_platform.init()
self.assertTrue(
light.setup(self.hass, {light.DOMAIN: {ha.CONF_TYPE: 'test'}}))
dev1, dev2, dev3 = mock_toggledevice_platform.get_lights(None, None)
# Test init
self.assertTrue(light.is_on(self.hass, dev1.entity_id))
self.assertFalse(light.is_on(self.hass, dev2.entity_id))
self.assertFalse(light.is_on(self.hass, dev3.entity_id))
# Test basic turn_on, turn_off services
light.turn_off(self.hass, entity_id=dev1.entity_id)
light.turn_on(self.hass, entity_id=dev2.entity_id)
self.hass._pool.block_till_done()
self.assertFalse(light.is_on(self.hass, dev1.entity_id))
self.assertTrue(light.is_on(self.hass, dev2.entity_id))
# turn on all lights
light.turn_on(self.hass)
self.hass._pool.block_till_done()
self.assertTrue(light.is_on(self.hass, dev1.entity_id))
self.assertTrue(light.is_on(self.hass, dev2.entity_id))
self.assertTrue(light.is_on(self.hass, dev3.entity_id))
# turn off all lights
light.turn_off(self.hass)
self.hass._pool.block_till_done()
self.assertFalse(light.is_on(self.hass, dev1.entity_id))
self.assertFalse(light.is_on(self.hass, dev2.entity_id))
self.assertFalse(light.is_on(self.hass, dev3.entity_id))
# Ensure all attributes process correctly
light.turn_on(self.hass, dev1.entity_id,
transition=10, brightness=20)
light.turn_on(
self.hass, dev2.entity_id, rgb_color=[255, 255, 255])
light.turn_on(self.hass, dev3.entity_id, xy_color=[.4, .6])
self.hass._pool.block_till_done()
method, data = dev1.last_call('turn_on')
self.assertEqual(
{light.ATTR_TRANSITION: 10,
light.ATTR_BRIGHTNESS: 20},
data)
method, data = dev2.last_call('turn_on')
self.assertEqual(
{light.ATTR_XY_COLOR: util.color_RGB_to_xy(255, 255, 255)},
data)
method, data = dev3.last_call('turn_on')
self.assertEqual({light.ATTR_XY_COLOR: [.4, .6]}, data)
# One of the light profiles
prof_name, prof_x, prof_y, prof_bri = 'relax', 0.5119, 0.4147, 144
# Test light profiles
light.turn_on(self.hass, dev1.entity_id, profile=prof_name)
# Specify a profile and attributes to overwrite it
light.turn_on(
self.hass, dev2.entity_id,
profile=prof_name, brightness=100, xy_color=[.4, .6])
self.hass._pool.block_till_done()
method, data = dev1.last_call('turn_on')
self.assertEqual(
{light.ATTR_BRIGHTNESS: prof_bri,
light.ATTR_XY_COLOR: [prof_x, prof_y]},
data)
method, data = dev2.last_call('turn_on')
self.assertEqual(
{light.ATTR_BRIGHTNESS: 100,
light.ATTR_XY_COLOR: [.4, .6]},
data)
# Test shitty data
light.turn_on(self.hass, dev1.entity_id, profile="nonexisting")
light.turn_on(self.hass, dev2.entity_id, xy_color=["bla-di-bla", 5])
light.turn_on(self.hass, dev3.entity_id, rgb_color=[255, None, 2])
self.hass._pool.block_till_done()
method, data = dev1.last_call('turn_on')
self.assertEqual({}, data)
method, data = dev2.last_call('turn_on')
self.assertEqual({}, data)
method, data = dev3.last_call('turn_on')
self.assertEqual({}, data)
# faulty attributes should not overwrite profile data
light.turn_on(
self.hass, dev1.entity_id,
profile=prof_name, brightness='bright', rgb_color='yellowish')
self.hass._pool.block_till_done()
method, data = dev1.last_call('turn_on')
self.assertEqual(
{light.ATTR_BRIGHTNESS: prof_bri,
light.ATTR_XY_COLOR: [prof_x, prof_y]},
data)
def test_setup(self):
""" Test the setup method. """
# Bogus config
self.assertFalse(light.setup(self.hass, {}))
self.assertFalse(light.setup(self.hass, {light.DOMAIN: {}}))
# Test with non-existing component
self.assertFalse(light.setup(
self.hass, {light.DOMAIN: {ha.CONF_TYPE: 'nonexisting'}}
))
# Test if light component returns 0 lightes
mock_toggledevice_platform.init(True)
self.assertEqual(
[], mock_toggledevice_platform.get_lights(None, None))
self.assertFalse(light.setup(
self.hass, {light.DOMAIN: {ha.CONF_TYPE: 'test'}}
))
def test_light_profiles(self):
""" Test light profiles. """
mock_toggledevice_platform.init()
user_light_file = self.hass.get_config_path(light.LIGHT_PROFILES_FILE)
# Setup a wrong light file
with open(user_light_file, 'w') as user_file:
user_file.write('id,x,y,brightness\n')
user_file.write('I,WILL,NOT,WORK\n')
self.assertFalse(light.setup(
self.hass, {light.DOMAIN: {ha.CONF_TYPE: 'test'}}
))
# Clean up broken file
os.remove(user_light_file)
with open(user_light_file, 'w') as user_file:
user_file.write('id,x,y,brightness\n')
user_file.write('test,.4,.6,100\n')
self.assertTrue(light.setup(
self.hass, {light.DOMAIN: {ha.CONF_TYPE: 'test'}}
))
dev1, dev2, dev3 = mock_toggledevice_platform.get_lights(None, None)
light.turn_on(self.hass, dev1.entity_id, profile='test')
self.hass._pool.block_till_done()
method, data = dev1.last_call('turn_on')
self.assertEqual(
{light.ATTR_XY_COLOR: [.4, .6], light.ATTR_BRIGHTNESS: 100},
data)
|
the-stack_106_24971 | """Matplotlib rankplot."""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
from ....stats.density_utils import histogram
from ...plot_utils import _scale_fig_size, make_label
from . import backend_kwarg_defaults, backend_show, create_axes_grid
def plot_rank(
axes,
length_plotters,
rows,
cols,
figsize,
plotters,
bins,
kind,
colors,
ref_line,
labels,
backend_kwargs,
show,
):
"""Matplotlib rankplot.."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
figsize, ax_labelsize, titlesize, _, _, _ = _scale_fig_size(figsize, None, rows=rows, cols=cols)
backend_kwargs.setdefault("figsize", figsize)
backend_kwargs.setdefault("squeeze", True)
if axes is None:
_, axes = create_axes_grid(
length_plotters,
rows,
cols,
backend_kwargs=backend_kwargs,
)
for ax, (var_name, selection, var_data) in zip(np.ravel(axes), plotters):
ranks = scipy.stats.rankdata(var_data, method="average").reshape(var_data.shape)
bin_ary = np.histogram_bin_edges(ranks, bins=bins, range=(0, ranks.size))
all_counts = np.empty((len(ranks), len(bin_ary) - 1))
for idx, row in enumerate(ranks):
_, all_counts[idx], _ = histogram(row, bins=bin_ary)
gap = all_counts.max() * 1.05
width = bin_ary[1] - bin_ary[0]
# Center the bins
bin_ary = (bin_ary[1:] + bin_ary[:-1]) / 2
y_ticks = []
if kind == "bars":
for idx, counts in enumerate(all_counts):
y_ticks.append(idx * gap)
ax.bar(
bin_ary,
counts,
bottom=y_ticks[-1],
width=width,
align="center",
color=colors[idx],
edgecolor=ax.get_facecolor(),
)
if ref_line:
ax.axhline(y=y_ticks[-1] + counts.mean(), linestyle="--", color="k")
if labels:
ax.set_ylabel("Chain", fontsize=ax_labelsize)
elif kind == "vlines":
ymin = all_counts.mean()
for idx, counts in enumerate(all_counts):
ax.plot(bin_ary, counts, "o", color=colors[idx])
ax.vlines(bin_ary, ymin, counts, lw=2, colors=colors[idx])
ax.set_ylim(0, all_counts.mean() * 2)
if ref_line:
ax.axhline(y=all_counts.mean(), linestyle="--", color="k")
if labels:
ax.set_xlabel("Rank (all chains)", fontsize=ax_labelsize)
ax.set_yticks(y_ticks)
ax.set_yticklabels(np.arange(len(y_ticks)))
ax.set_title(make_label(var_name, selection), fontsize=titlesize)
else:
ax.set_yticks([])
if backend_show(show):
plt.show()
return axes
|
the-stack_106_24972 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import html
import json
import re
import unittest.mock
import urllib.parse
import pytest
from airflow import settings
from airflow.executors.celery_executor import CeleryExecutor
from airflow.models import DagBag, DagModel, TaskInstance
from airflow.models.dagcode import DagCode
from airflow.ti_deps.dependencies_states import QUEUEABLE_STATES, RUNNABLE_STATES
from airflow.utils import dates, timezone
from airflow.utils.log.logging_mixin import ExternalLoggingMixin
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.www.views import TaskInstanceModelView
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs
from tests.test_utils.www import check_content_in_response, check_content_not_in_response
DEFAULT_DATE = dates.days_ago(2)
DEFAULT_VAL = urllib.parse.quote_plus(str(DEFAULT_DATE))
@pytest.fixture(scope="module", autouse=True)
def reset_dagruns():
"""Clean up stray garbage from other tests."""
clear_db_runs()
@pytest.fixture(autouse=True)
def init_dagruns(app, reset_dagruns):
app.dag_bag.get_dag("example_bash_operator").create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING,
)
app.dag_bag.get_dag("example_subdag_operator").create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING,
)
app.dag_bag.get_dag("example_xcom").create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING,
)
yield
clear_db_runs()
@pytest.mark.parametrize(
"url, contents",
[
pytest.param(
"/",
[
"/delete?dag_id=example_bash_operator",
"return confirmDeleteDag(this, 'example_bash_operator')",
],
id="delete-dag-button-normal",
),
pytest.param(
f'task?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}',
['Task Instance Details'],
id="task",
),
pytest.param(
f'xcom?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}',
['XCom'],
id="xcom",
),
pytest.param('xcom/list', ['List XComs'], id="xcom-list"),
pytest.param(
f'rendered-templates?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}',
['Rendered Template'],
id="rendered-templates",
),
pytest.param(
'dag_details?dag_id=example_bash_operator',
['DAG Details'],
id="dag-details",
),
pytest.param(
'dag_details?dag_id=example_subdag_operator.section-1',
['DAG Details'],
id="dag-details-subdag",
),
pytest.param(
'graph?dag_id=example_bash_operator',
['runme_1'],
id='graph',
),
pytest.param(
'tree?dag_id=example_bash_operator',
['runme_1'],
id='tree',
),
pytest.param(
'tree?dag_id=example_subdag_operator.section-1',
['section-1-task-1'],
id="tree-subdag",
),
pytest.param(
'duration?days=30&dag_id=example_bash_operator',
['example_bash_operator'],
id='duration',
),
pytest.param(
'duration?days=30&dag_id=missing_dag',
['seems to be missing'],
id='duration-missing',
),
pytest.param(
'tries?days=30&dag_id=example_bash_operator',
['example_bash_operator'],
id='tries',
),
pytest.param(
'landing_times?days=30&dag_id=example_bash_operator',
['example_bash_operator'],
id='landing-times',
),
pytest.param(
'gantt?dag_id=example_bash_operator',
['example_bash_operator'],
id="gantt",
),
pytest.param(
"dag-dependencies",
["child_task1", "test_trigger_dagrun"],
id="dag-dependencies",
),
# Test that Graph, Tree, Calendar & Dag Details View uses the DagBag
# already created in views.py
pytest.param(
"graph?dag_id=example_bash_operator",
["example_bash_operator"],
id="existing-dagbag-graph",
),
pytest.param(
"tree?dag_id=example_bash_operator",
["example_bash_operator"],
id="existing-dagbag-tree",
),
pytest.param(
"calendar?dag_id=example_bash_operator",
["example_bash_operator"],
id="existing-dagbag-calendar",
),
pytest.param(
"dag_details?dag_id=example_bash_operator",
["example_bash_operator"],
id="existing-dagbag-dag-details",
),
pytest.param(
f'confirm?task_id=runme_0&dag_id=example_bash_operator&state=success'
f'&execution_date={DEFAULT_VAL}',
['Wait a minute.'],
id="confirm-success",
),
pytest.param(
f'confirm?task_id=runme_0&dag_id=example_bash_operator&state=failed&execution_date={DEFAULT_VAL}',
['Wait a minute.'],
id="confirm-failed",
),
pytest.param(
f'confirm?task_id=runme_0&dag_id=invalid_dag&state=failed&execution_date={DEFAULT_VAL}',
['DAG invalid_dag not found'],
id="confirm-failed",
),
pytest.param(
f'confirm?task_id=invalid_task&dag_id=example_bash_operator&state=failed'
f'&execution_date={DEFAULT_VAL}',
['Task invalid_task not found'],
id="confirm-failed",
),
pytest.param(
f'confirm?task_id=runme_0&dag_id=example_bash_operator&state=invalid'
f'&execution_date={DEFAULT_VAL}',
["Invalid state invalid, must be either 'success' or 'failed'"],
id="confirm-invalid",
),
],
)
def test_views_get(admin_client, url, contents):
resp = admin_client.get(url, follow_redirects=True)
for content in contents:
check_content_in_response(content, resp)
def test_rendered_k8s(admin_client):
url = f'rendered-k8s?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}'
with unittest.mock.patch.object(settings, "IS_K8S_OR_K8SCELERY_EXECUTOR", True):
resp = admin_client.get(url, follow_redirects=True)
check_content_in_response('K8s Pod Spec', resp)
@conf_vars({('core', 'executor'): 'LocalExecutor'})
def test_rendered_k8s_without_k8s(admin_client):
url = f'rendered-k8s?task_id=runme_0&dag_id=example_bash_operator&execution_date={DEFAULT_VAL}'
resp = admin_client.get(url, follow_redirects=True)
assert 404 == resp.status_code
@pytest.mark.parametrize(
"test_str, expected_text",
[
("hello\nworld", r'\"conf\":{\"abc\":\"hello\\nworld\"}'),
("hello'world", r'\"conf\":{\"abc\":\"hello\\u0027world\"}'),
("<script>", r'\"conf\":{\"abc\":\"\\u003cscript\\u003e\"}'),
("\"", r'\"conf\":{\"abc\":\"\\\"\"}'),
],
)
def test_escape_in_tree_view(app, admin_client, test_str, expected_text):
app.dag_bag.get_dag('test_tree_view').create_dagrun(
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
run_type=DagRunType.MANUAL,
state=State.RUNNING,
conf={"abc": test_str},
)
url = 'tree?dag_id=test_tree_view'
resp = admin_client.get(url, follow_redirects=True)
check_content_in_response(expected_text, resp)
def test_dag_details_trigger_origin_tree_view(app, admin_client):
app.dag_bag.get_dag('test_tree_view').create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING,
)
url = 'dag_details?dag_id=test_tree_view'
resp = admin_client.get(url, follow_redirects=True)
params = {'dag_id': 'test_tree_view', 'origin': '/tree?dag_id=test_tree_view'}
href = f"/trigger?{html.escape(urllib.parse.urlencode(params))}"
check_content_in_response(href, resp)
def test_dag_details_trigger_origin_graph_view(app, admin_client):
app.dag_bag.get_dag('test_graph_view').create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING,
)
url = 'dag_details?dag_id=test_graph_view'
resp = admin_client.get(url, follow_redirects=True)
params = {'dag_id': 'test_graph_view', 'origin': '/graph?dag_id=test_graph_view'}
href = f"/trigger?{html.escape(urllib.parse.urlencode(params))}"
check_content_in_response(href, resp)
def test_last_dagruns(admin_client):
resp = admin_client.post('last_dagruns', follow_redirects=True)
check_content_in_response('example_bash_operator', resp)
def test_last_dagruns_success_when_selecting_dags(admin_client):
resp = admin_client.post(
'last_dagruns', data={'dag_ids': ['example_subdag_operator']}, follow_redirects=True
)
assert resp.status_code == 200
stats = json.loads(resp.data.decode('utf-8'))
assert 'example_bash_operator' not in stats
assert 'example_subdag_operator' in stats
# Multiple
resp = admin_client.post(
'last_dagruns',
data={'dag_ids': ['example_subdag_operator', 'example_bash_operator']},
follow_redirects=True,
)
stats = json.loads(resp.data.decode('utf-8'))
assert 'example_bash_operator' in stats
assert 'example_subdag_operator' in stats
check_content_not_in_response('example_xcom', resp)
def test_code(admin_client):
url = 'code?dag_id=example_bash_operator'
resp = admin_client.get(url, follow_redirects=True)
check_content_not_in_response('Failed to load file', resp)
check_content_in_response('example_bash_operator', resp)
def test_code_no_file(admin_client):
url = 'code?dag_id=example_bash_operator'
mock_open_patch = unittest.mock.mock_open(read_data='')
mock_open_patch.side_effect = FileNotFoundError
with unittest.mock.patch('builtins.open', mock_open_patch), unittest.mock.patch(
"airflow.models.dagcode.STORE_DAG_CODE", False
):
resp = admin_client.get(url, follow_redirects=True)
check_content_in_response('Failed to load file', resp)
check_content_in_response('example_bash_operator', resp)
@conf_vars({("core", "store_dag_code"): "True"})
def test_code_from_db(admin_client):
dag = DagBag(include_examples=True).get_dag("example_bash_operator")
DagCode(dag.fileloc, DagCode._get_code_from_file(dag.fileloc)).sync_to_db()
url = 'code?dag_id=example_bash_operator'
resp = admin_client.get(url)
check_content_not_in_response('Failed to load file', resp)
check_content_in_response('example_bash_operator', resp)
@conf_vars({("core", "store_dag_code"): "True"})
def test_code_from_db_all_example_dags(admin_client):
dagbag = DagBag(include_examples=True)
for dag in dagbag.dags.values():
DagCode(dag.fileloc, DagCode._get_code_from_file(dag.fileloc)).sync_to_db()
url = 'code?dag_id=example_bash_operator'
resp = admin_client.get(url)
check_content_not_in_response('Failed to load file', resp)
check_content_in_response('example_bash_operator', resp)
@pytest.mark.parametrize(
"url, data, content",
[
('paused?dag_id=example_bash_operator&is_paused=false', None, 'OK'),
(
"failed",
dict(
task_id="run_this_last",
dag_id="example_bash_operator",
execution_date=DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
origin="/graph?dag_id=example_bash_operator",
),
"Marked failed on 1 task instances",
),
(
"success",
dict(
task_id="run_this_last",
dag_id="example_bash_operator",
execution_date=DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
origin="/graph?dag_id=example_bash_operator",
),
"Marked success on 1 task instances",
),
(
"clear",
dict(
task_id="runme_1",
dag_id="example_bash_operator",
execution_date=DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
only_failed="false",
),
"example_bash_operator",
),
(
"run",
dict(
task_id="runme_0",
dag_id="example_bash_operator",
ignore_all_deps="false",
ignore_ti_state="true",
execution_date=DEFAULT_DATE,
),
"",
),
],
ids=[
"paused",
"failed-flash-hint",
"success-flash-hint",
"clear",
"run",
],
)
def test_views_post(admin_client, url, data, content):
resp = admin_client.post(url, data=data, follow_redirects=True)
check_content_in_response(content, resp)
@pytest.mark.parametrize("url", ["failed", "success"])
def test_dag_never_run(admin_client, url):
dag_id = "example_bash_operator"
form = dict(
task_id="run_this_last",
dag_id=dag_id,
execution_date=DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
origin="/graph?dag_id=example_bash_operator",
)
clear_db_runs()
resp = admin_client.post(url, data=form, follow_redirects=True)
check_content_in_response(f"Cannot mark tasks as {url}, seem that dag {dag_id} has never run", resp)
class _ForceHeartbeatCeleryExecutor(CeleryExecutor):
def heartbeat(self):
return True
@pytest.mark.parametrize("state", RUNNABLE_STATES)
@unittest.mock.patch(
'airflow.executors.executor_loader.ExecutorLoader.get_default_executor',
return_value=_ForceHeartbeatCeleryExecutor(),
)
def test_run_with_runnable_states(_, admin_client, session, state):
task_id = 'runme_0'
session.query(TaskInstance).filter(TaskInstance.task_id == task_id).update(
{'state': state, 'end_date': timezone.utcnow()}
)
session.commit()
form = dict(
task_id=task_id,
dag_id="example_bash_operator",
ignore_all_deps="false",
ignore_ti_state="false",
execution_date=DEFAULT_DATE,
origin='/home',
)
resp = admin_client.post('run', data=form, follow_redirects=True)
check_content_in_response('', resp)
msg = (
f"Task is in the '{state}' state which is not a valid state for "
f"execution. The task must be cleared in order to be run"
)
assert not re.search(msg, resp.get_data(as_text=True))
@pytest.mark.parametrize("state", QUEUEABLE_STATES)
@unittest.mock.patch(
'airflow.executors.executor_loader.ExecutorLoader.get_default_executor',
return_value=CeleryExecutor(),
)
def test_run_with_not_runnable_states(_, admin_client, session, state):
assert state not in RUNNABLE_STATES
task_id = 'runme_0'
session.query(TaskInstance).filter(TaskInstance.task_id == task_id).update(
{'state': state, 'end_date': timezone.utcnow()}
)
session.commit()
form = dict(
task_id=task_id,
dag_id="example_bash_operator",
ignore_all_deps="false",
ignore_ti_state="false",
execution_date=DEFAULT_DATE,
origin='/home',
)
resp = admin_client.post('run', data=form, follow_redirects=True)
check_content_in_response('', resp)
msg = (
f"Task is in the '{state}' state which is not a valid state for "
f"execution. The task must be cleared in order to be run"
)
assert re.search(msg, resp.get_data(as_text=True))
@pytest.fixture()
def new_id_example_bash_operator():
dag_id = 'example_bash_operator'
test_dag_id = "non_existent_dag"
with create_session() as session:
dag_query = session.query(DagModel).filter(DagModel.dag_id == dag_id)
dag_query.first().tags = [] # To avoid "FOREIGN KEY constraint" error)
with create_session() as session:
dag_query.update({'dag_id': test_dag_id})
yield test_dag_id
with create_session() as session:
session.query(DagModel).filter(DagModel.dag_id == test_dag_id).update({'dag_id': dag_id})
def test_delete_dag_button_for_dag_on_scheduler_only(admin_client, new_id_example_bash_operator):
# Test for JIRA AIRFLOW-3233 (PR 4069):
# The delete-dag URL should be generated correctly for DAGs
# that exist on the scheduler (DB) but not the webserver DagBag
test_dag_id = new_id_example_bash_operator
resp = admin_client.get('/', follow_redirects=True)
check_content_in_response(f'/delete?dag_id={test_dag_id}', resp)
check_content_in_response(f"return confirmDeleteDag(this, '{test_dag_id}')", resp)
@pytest.mark.parametrize("endpoint", ["graph", "tree"])
def test_show_external_log_redirect_link_with_local_log_handler(capture_templates, admin_client, endpoint):
"""Do not show external links if log handler is local."""
url = f'{endpoint}?dag_id=example_bash_operator'
with capture_templates() as templates:
admin_client.get(url, follow_redirects=True)
ctx = templates[0].local_context
assert not ctx['show_external_log_redirect']
assert ctx['external_log_name'] is None
class _ExternalHandler(ExternalLoggingMixin):
_supports_external_link = True
LOG_NAME = 'ExternalLog'
@property
def log_name(self) -> str:
return self.LOG_NAME
def get_external_log_url(self, *args, **kwargs) -> str:
return 'http://external-service.com'
@property
def supports_external_link(self) -> bool:
return self._supports_external_link
@pytest.mark.parametrize("endpoint", ["graph", "tree"])
@unittest.mock.patch(
'airflow.utils.log.log_reader.TaskLogReader.log_handler',
new_callable=unittest.mock.PropertyMock,
return_value=_ExternalHandler(),
)
def test_show_external_log_redirect_link_with_external_log_handler(
_, capture_templates, admin_client, endpoint
):
"""Show external links if log handler is external."""
url = f'{endpoint}?dag_id=example_bash_operator'
with capture_templates() as templates:
admin_client.get(url, follow_redirects=True)
ctx = templates[0].local_context
assert ctx['show_external_log_redirect']
assert ctx['external_log_name'] == _ExternalHandler.LOG_NAME
@pytest.mark.parametrize("endpoint", ["graph", "tree"])
@unittest.mock.patch(
'airflow.utils.log.log_reader.TaskLogReader.log_handler',
new_callable=unittest.mock.PropertyMock,
return_value=_ExternalHandler(),
)
def test_external_log_redirect_link_with_external_log_handler_not_shown(
_external_handler, capture_templates, admin_client, endpoint
):
"""Show external links if log handler is external."""
_external_handler.return_value._supports_external_link = False
url = f'{endpoint}?dag_id=example_bash_operator'
with capture_templates() as templates:
admin_client.get(url, follow_redirects=True)
ctx = templates[0].local_context
assert not ctx['show_external_log_redirect']
assert ctx['external_log_name'] is None
def _get_appbuilder_pk_string(model_view_cls, instance) -> str:
"""Utility to get Flask-Appbuilder's string format "pk" for an object.
Used to generate requests to FAB action views without *too* much difficulty.
The implementation relies on FAB internals, but unfortunately I don't see
a better way around it.
Example usage::
>>> from airflow.www.views import TaskInstanceModelView
>>> ti = session.Query(TaskInstance).filter(...).one()
>>> pk = _get_appbuilder_pk_string(TaskInstanceModelView, ti)
>>> client.post("...", data={"action": "...", "rowid": pk})
"""
pk_value = model_view_cls.datamodel.get_pk_value(instance)
return model_view_cls._serialize_pk_if_composite(model_view_cls, pk_value)
def test_task_instance_clear(session, admin_client):
task_id = "runme_0"
# Set the state to success for clearing.
ti_q = session.query(TaskInstance).filter(TaskInstance.task_id == task_id)
ti_q.update({"state": State.SUCCESS})
session.commit()
# Send a request to clear.
rowid = _get_appbuilder_pk_string(TaskInstanceModelView, ti_q.one())
resp = admin_client.post(
"/taskinstance/action_post",
data={"action": "clear", "rowid": rowid},
follow_redirects=True,
)
assert resp.status_code == 200
# Now the state should be None.
state = session.query(TaskInstance.state).filter(TaskInstance.task_id == task_id).scalar()
assert state == State.NONE
def test_task_instance_clear_failure(admin_client):
rowid = '["12345"]' # F.A.B. crashes if the rowid is *too* invalid.
resp = admin_client.post(
"/taskinstance/action_post",
data={"action": "clear", "rowid": rowid},
follow_redirects=True,
)
assert resp.status_code == 200
check_content_in_response("Failed to clear task instances:", resp)
@pytest.mark.parametrize(
"action, expected_state",
[
("set_running", State.RUNNING),
("set_failed", State.FAILED),
("set_success", State.SUCCESS),
("set_retry", State.UP_FOR_RETRY),
],
ids=["running", "failed", "success", "retry"],
)
def test_task_instance_set_state(session, admin_client, action, expected_state):
task_id = "runme_0"
# Send a request to clear.
ti_q = session.query(TaskInstance).filter(TaskInstance.task_id == task_id)
rowid = _get_appbuilder_pk_string(TaskInstanceModelView, ti_q.one())
resp = admin_client.post(
"/taskinstance/action_post",
data={"action": action, "rowid": rowid},
follow_redirects=True,
)
assert resp.status_code == 200
# Now the state should be modified.
state = session.query(TaskInstance.state).filter(TaskInstance.task_id == task_id).scalar()
assert state == expected_state
@pytest.mark.parametrize(
"action",
[
"set_running",
"set_failed",
"set_success",
"set_retry",
],
)
def test_task_instance_set_state_failure(admin_client, action):
rowid = '["12345"]' # F.A.B. crashes if the rowid is *too* invalid.
resp = admin_client.post(
"/taskinstance/action_post",
data={"action": action, "rowid": rowid},
follow_redirects=True,
)
assert resp.status_code == 200
check_content_in_response("Failed to set state", resp)
|
the-stack_106_24976 | import argparse
import logging
from funcy import compact
from dvc.cli.command import CmdBase
from dvc.cli.utils import append_doc_link
from dvc.exceptions import InvalidArgumentError
from dvc.ui import ui
logger = logging.getLogger(__name__)
class CmdExperimentsInit(CmdBase):
DEFAULT_NAME = "train"
CODE = "src"
DATA = "data"
MODELS = "models"
DEFAULT_METRICS = "metrics.json"
DEFAULT_PARAMS = "params.yaml"
PLOTS = "plots"
DVCLIVE = "dvclive"
DEFAULTS = {
"code": CODE,
"data": DATA,
"models": MODELS,
"metrics": DEFAULT_METRICS,
"params": DEFAULT_PARAMS,
"plots": PLOTS,
"live": DVCLIVE,
}
EXP_LINK = "https://s.dvc.org/g/exp/run"
def run(self):
from dvc.commands.stage import parse_cmd
cmd = parse_cmd(self.args.command)
if not self.args.interactive and not cmd:
raise InvalidArgumentError("command is not specified")
from dvc.repo.experiments.init import init
defaults = {}
if not self.args.explicit:
config = self.repo.config["exp"]
defaults.update({**self.DEFAULTS, **config})
cli_args = compact(
{
"cmd": cmd,
"code": self.args.code,
"data": self.args.data,
"models": self.args.models,
"metrics": self.args.metrics,
"params": self.args.params,
"plots": self.args.plots,
"live": self.args.live,
}
)
initialized_stage = init(
self.repo,
name=self.args.name,
type=self.args.type,
defaults=defaults,
overrides=cli_args,
interactive=self.args.interactive,
force=self.args.force,
)
text = ui.rich_text.assemble(
"Created ",
(self.args.name, "bright_blue"),
" stage in ",
("dvc.yaml", "green"),
".",
)
if not self.args.run:
text.append_text(
ui.rich_text.assemble(
" To run, use ",
('"dvc exp run"', "green"),
".\nSee ",
(self.EXP_LINK, "repr.url"),
".",
)
)
ui.write(text, styled=True)
if self.args.run:
return self.repo.experiments.run(
targets=[initialized_stage.addressing]
)
return 0
def add_parser(experiments_subparsers, parent_parser):
EXPERIMENTS_INIT_HELP = "Quickly setup any project to use experiments."
experiments_init_parser = experiments_subparsers.add_parser(
"init",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_INIT_HELP, "exp/init"),
formatter_class=argparse.RawDescriptionHelpFormatter,
help=EXPERIMENTS_INIT_HELP,
)
experiments_init_parser.add_argument(
"command",
nargs=argparse.REMAINDER,
help="Command to execute.",
metavar="command",
)
experiments_init_parser.add_argument(
"--run",
action="store_true",
help="Run the experiment after initializing it",
)
experiments_init_parser.add_argument(
"--interactive",
"-i",
action="store_true",
help="Prompt for values that are not provided",
)
experiments_init_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Overwrite existing stage",
)
experiments_init_parser.add_argument(
"--explicit",
action="store_true",
default=False,
help="Only use the path values explicitly provided",
)
experiments_init_parser.add_argument(
"--name",
"-n",
help="Name of the stage to create (default: %(default)s)",
default=CmdExperimentsInit.DEFAULT_NAME,
)
experiments_init_parser.add_argument(
"--code",
help="Path to the source file or directory "
"which your experiments depend"
f" (default: {CmdExperimentsInit.CODE})",
)
experiments_init_parser.add_argument(
"--data",
help="Path to the data file or directory "
"which your experiments depend"
f" (default: {CmdExperimentsInit.DATA})",
)
experiments_init_parser.add_argument(
"--models",
help="Path to the model file or directory for your experiments"
f" (default: {CmdExperimentsInit.MODELS})",
)
experiments_init_parser.add_argument(
"--params",
help="Path to the parameters file for your experiments"
f" (default: {CmdExperimentsInit.DEFAULT_PARAMS})",
)
experiments_init_parser.add_argument(
"--metrics",
help="Path to the metrics file for your experiments"
f" (default: {CmdExperimentsInit.DEFAULT_METRICS})",
)
experiments_init_parser.add_argument(
"--plots",
help="Path to the plots file or directory for your experiments"
f" (default: {CmdExperimentsInit.PLOTS})",
)
experiments_init_parser.add_argument(
"--live",
help="Path to log dvclive outputs for your experiments"
f" (default: {CmdExperimentsInit.DVCLIVE})",
)
experiments_init_parser.add_argument(
"--type",
choices=["default", "dl"],
default="default",
help="Select type of stage to create (default: %(default)s)",
)
experiments_init_parser.set_defaults(func=CmdExperimentsInit)
|
the-stack_106_24977 | from mpvr.datamodule.manager import Manager as dm
from mpvr.utils.process import *
from scipy.signal import savgol_filter
import numpy as np
import pandas as pd
dm = dm.from_config(dm.section_list()[0])
max_val = 0
sum_of_incidence = np.zeros(315)
for scenario in dm.get_scenarios():
dm.set_scenario(scenario)
df = dm.get_processed_data('mpe')
sum_of_incidence += dm.get_incidence_data()
mpe = df['MPEntropy'].values
if np.max(np.abs(mpe)) > max_val:
max_val = np.max(np.abs(mpe))
grid = np.array([-0.7, -0.3, 0.3, 0.7]) * max_val
for scenario in dm.get_scenarios():
print(scenario)
dm.set_scenario(scenario)
df = dm.get_processed_data('mpe')
time, mpe = df['Time'].values, df['MPEntropy'].values
mos = absolute_category_rating(mpe, grid)
df = dm.get_processed_data('mpe', remark_dir='savgol92/')
mpe92 = df['MPEntropy'].values
mos92 = absolute_category_rating(mpe92, grid)
incidence = dm.get_incidence_data()
fig, axes = dm.fig_setup(3, ['MPEntropy', 'MOS', 'Incidence'], np.arange(0, 108, 2), times = time)
axes[0].plot(time, mpe, ':', label='Before Filtering')
axes[0].plot(time, mpe92, '', label='After Savgol 92')
for y in grid:
axes[0].axhline(y, color='r', linestyle=':')
axes[1].plot(time, mos, ':', label='Before Filtering')
axes[1].plot(time, mos92, '', label='After Savgol 92')
axes[2].bar(time, incidence, width=0.2)
axes[1].set_yticks(np.arange(1, 6, 1))
axes[2].set_yticks(np.arange(0, 5, 1))
axes[0].legend()
axes[1].legend()
dm.fig_finalize(tag='mpe', remark_dir='mos/')
for scenario in dm.get_scenarios():
print(scenario)
dm.set_scenario(scenario)
df = dm.get_processed_data('mpe')
time, mpe = df['Time'].values, df['MPEntropy'].values
mos = absolute_category_rating(mpe, grid)
incidence = dm.get_incidence_data()
fig, axes = dm.fig_setup(3, ['MPEntropy', 'MOS', 'Incidence'], np.arange(0, 108, 2), times = time)
axes[0].plot(time, mpe, ':')
for y in grid:
axes[0].axhline(y, color='r', linestyle=':')
axes[1].plot(time, mos)
axes[2].bar(time, incidence, width=0.2)
axes[1].set_yticks(np.arange(1, 6, 1))
axes[2].set_yticks(np.arange(0, 5, 1))
dm.fig_finalize(tag='mpe', remark_dir='mos/')
|
the-stack_106_24978 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 基础功能
Case Name :主机执行gs_backup -l指定已存在文件进行恢复,日志存放文件是否会将信息写入
Description :
1.在$GAUSSLOG路径下创建文件并写入内容(内容随意)
2.指定参数-l执行gs_backup
3.查看主机与备机对应目录,日志是否写入指定文件并覆盖文件内容
4.删除新建文件
Expect :
1.文件创建成功,写入内容成功
2.指定参数-l执行gs_backup成功
3.查看主机与备机对应目录,日志未写入指定文件,而是产生了新的文件,未覆盖文件内容
4.删除新建文件成功
History :
"""
import unittest
import os
from yat.test import macro
from yat.test import Node
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
commonshpri = CommonSH('PrimaryDbUser')
@unittest.skipIf(1 == commonshpri.get_node_num(),
'需主备环境,若为单机环境则不执行')
class Backupclass(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info("-----------this is setup-----------")
self.log.info("Opengauss_Function_Tools_gs_backup_Case0031 start")
self.constant = Constant()
self.parent_path = os.path.dirname(macro.DB_INSTANCE_PATH)
self.backup_path = os.path.join(self.parent_path, 'base_backup')
self.primary_user_node = Node(node='PrimaryDbUser')
self.sta1_user_node = Node(node='Standby1DbUser')
self.file_path = os.path.join(macro.GAUSSDB_LOG_PATH,
'test_case0031.log')
cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_backup -t backup --backup-dir={self.backup_path} --all"
self.log.info(cmd)
result = self.primary_user_node.sh(cmd).result()
self.log.info(result)
self.assertIn(self.constant.gs_backup_success, result)
def test_backup0026(self):
text = '--step1: 在$GAUSSLOG路径下创建文件并写入内容(内容随意) expect:文件创建成功,写入内容成功--'
self.log.info(text)
cmd = f"source {macro.DB_ENV_PATH};" \
f"touch {self.file_path};" \
f"echo 'test to test' > {self.file_path}"
self.log.info(cmd)
result = self.primary_user_node.sh(cmd).result()
self.log.info(result)
text = '----step2: 指定参数-l执行gs_backup expect:指定参数-l执行gs_backup成功----'
self.log.info(text)
cmd = f"source {macro.DB_ENV_PATH};" \
f"gs_backup -t restore --backup-dir={self.backup_path} " \
f" -l {self.file_path}"
self.log.info(cmd)
result = self.primary_user_node.sh(cmd).result()
self.log.info(result)
self.assertIn(self.constant.gs_backup_restore_success, result,
'执行失败:' + text)
text = '--step3:查看主机与备机对应目录,日志是否写入指定文件并覆盖文件内容 ' \
'expect:查看主机与备机对应目录,日志未写入指定文件,而是产生了新的文件,未覆盖文件内容----'
self.log.info(text)
cmd = f"ls {macro.GAUSSDB_LOG_PATH};" \
f"cat {self.file_path}"
self.log.info(cmd)
result = self.primary_user_node.sh(cmd).result()
self.log.info(result)
self.assertIn('gs_local', result, '执行失败:' + text)
self.assertEqual(result.count('test_case0031'), 2, '执行失败:' + text)
result = self.sta1_user_node.sh(cmd).result()
self.log.info(result)
self.assertIn('gs_local', result, '执行失败:' + text)
def tearDown(self):
self.log.info('------------this is tearDown-------------')
self.log.info('------------------清理环境-------------')
cmd = f"rm -rf {self.backup_path};" \
f"rm -rf {self.file_path};" \
f"rm -rf {os.path.join(macro.GAUSSDB_LOG_PATH, 'gs_local*')};" \
f"rm -rf {os.path.join(macro.GAUSSDB_LOG_PATH, 'test_case0031*')}"
self.log.info(cmd)
result = self.primary_user_node.sh(cmd).result()
self.log.info(result)
result = self.sta1_user_node.sh(cmd).result()
self.log.info(result)
self.log.info("-Opengauss_Function_Tools_gs_backup_Case0031 end-")
|
the-stack_106_24979 | from typing import Hashable, Iterable, List, Optional
from .._internal import API
@API.public
class AntidoteError(Exception):
"""Base class of all errors of antidote."""
def __repr__(self) -> str:
return f"{type(self).__name__}({self})"
@API.public
class DoubleInjectionError(AntidoteError):
"""
Raised when injecting a function/method that already has been injected.
"""
def __init__(self, func: object) -> None:
super().__init__(f"Object {func} has already been injected by Antidote.")
# Inheriting RuntimeError, as it used to be one before using a custom exception.
# Do not rely on it being a RuntimeError. It will be removed in the future.
@API.public
class NoInjectionsFoundError(AntidoteError, RuntimeError):
"""
Raised when no injection could be found for a given function, even though parameters like
:code:`ignore_type_hints` were explicitly specified. Usually this implies that
:py:func:`.inject` was supposed to find injection, but just not in the type hints.
"""
@API.public
class DuplicateDependencyError(AntidoteError):
"""
A dependency already exists with the same id.
*May* be raised by _providers.
"""
@API.public
class DependencyInstantiationError(AntidoteError):
"""
The dependency could not be instantiated.
"""
def __init__(self, dependency: Hashable, stack: Optional[List[Hashable]] = None) -> None:
from .._internal.utils import debug_repr
msg = f"Could not instantiate {debug_repr(dependency)}"
stack = stack or []
if stack: # first and last dependency will raise their own errors.
stack.append(dependency)
msg += f"\nFull dependency stack:\n{_stack_repr(stack)}\n"
super().__init__(msg)
@API.public
class DependencyCycleError(AntidoteError):
"""
A dependency cycle is found.
"""
def __init__(self, stack: List[Hashable]) -> None:
super().__init__(f"Cycle:\n{_stack_repr(stack)}\n")
@API.public
class DependencyNotFoundError(AntidoteError):
"""
The dependency could not be found.
"""
def __init__(self, dependency: Hashable) -> None:
from .._internal.utils import debug_repr
super().__init__(debug_repr(dependency))
@API.public
class FrozenWorldError(AntidoteError):
"""
An action failed because the world is frozen. Typically happens when trying
to register a dependency after having called freeze() on the world.
"""
@API.private
class DebugNotAvailableError(AntidoteError):
"""
Currently provider do not have to implement the debug behavior. If not, this error
will be raised and discarded (a warning may be emitted).
"""
@API.private
def _stack_repr(stack: Iterable[object]) -> str:
from .._internal.utils import debug_repr
import textwrap
text: List[str] = []
for depth, dependency in enumerate(stack):
indent = " " * (depth - 1) if depth > 1 else ""
first_line, *rest = debug_repr(dependency).split("\n", 1)
text.append(f"{indent}{'└── ' if depth else ''}{first_line}")
if rest:
text.append(textwrap.indent(rest[0], indent + (" " if depth > 1 else "")))
return "\n".join(text)
|
the-stack_106_24984 | """
Updating submission annotations
Deprecate many of these functions once synapseclient==2.1.0
"""
import json
import typing
from synapseclient import SubmissionStatus, Annotations
from synapseclient.annotations import (is_synapse_annotations,
to_synapse_annotations,
from_synapse_annotations)
from .utils import update_single_submission_status
def _convert_to_annotation_cls(
sub_status: SubmissionStatus,
values: typing.Union[Annotations, dict]) -> Annotations:
"""Convert synapse style annotation or dict to synapseclient.Annotation
Args:
sub_status: A synapseclient.SubmissionStatus
values: A synapseclient.Annotations or dict
Returns:
A synapseclient.Annotations
"""
if isinstance(values, Annotations):
return values
if is_synapse_annotations(values):
values = from_synapse_annotations(values)
else:
values = Annotations(id=sub_status.id,
etag=sub_status.etag,
values=values)
return values
def update_submission_status(sub_status: SubmissionStatus,
values: typing.Union[Annotations, dict],
status: str = None) -> SubmissionStatus:
"""Updates submission status and annotations
Args:
sub_status: A synapseclient.SubmissionStatus
values: A synapseclient.Annotations or dict
status: A submission status (e.g. RECEIVED, SCORED...)
Returns:
A updated synapseclient.SubmissionStatus
"""
if status is not None:
sub_status.status = status
existing = sub_status.get("submissionAnnotations", {})
# Convert to synapseclient.Annotation class
existing_annotations = _convert_to_annotation_cls(sub_status, existing)
new_annotations = _convert_to_annotation_cls(sub_status, values)
# Can use dict.update to update annotations
existing_annotations.update(new_annotations)
# Must turn synapseclient.Annotation into a synapse style annotations
syn_annotations = to_synapse_annotations(existing_annotations)
sub_status.submissionAnnotations = syn_annotations
return sub_status
def annotate_submission_with_json(syn, submissionid, annotation_values,
**kwargs):
"""
ChallengeWorkflowTemplate tool: Annotates submission with annotation
values from a json file and uses exponential backoff to retry when
there are concurrent update issues (HTTP 412).
Args:
syn: Synapse object
submissionid: Submission id
annotation_values: Annotation json file
**kwargs: is_private: Set annotations acl to private (default is True)
force: Force change the annotation from
private to public and vice versa.
status: A submission status (e.g. RECEIVED, SCORED...)
Returns:
synapseclient.SubmissionStatus
"""
with open(annotation_values) as json_data:
annotation_json = json.load(json_data)
sub_status = annotate_submission(syn, submissionid, annotation_json,
**kwargs)
return sub_status
def annotate_submission(syn, submissionid, annotation_dict,
status=None, is_private=True, force=False):
"""Annotate submission with annotation values from a dict
Args:
syn: Synapse object
submissionid: Submission id
annotation_dict: Annotation dict
is_private: Set annotations acl to private (default is True)
force: Force change the annotation from
private to public and vice versa.
"""
sub_status = syn.getSubmissionStatus(submissionid)
# Don't add any annotations that are None or []
not_add = [None, []]
annotation_dict = {key: annotation_dict[key] for key in annotation_dict
if annotation_dict[key] not in not_add}
# TODO: Remove once submissionview is fully supported
sub_status = update_single_submission_status(sub_status, annotation_dict,
is_private=is_private,
force=force)
sub_status = update_submission_status(sub_status, annotation_dict,
status=status)
return syn.store(sub_status)
|
the-stack_106_24986 | """ Robot planning problem turned into openai gym-like, reinforcement learning style environment """
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import attr
import copy
import numpy as np
from bc_gym_planning_env.robot_models.tricycle_model import TricycleRobot
from bc_gym_planning_env.robot_models.robot_dimensions_examples import get_dimensions_example
from bc_gym_planning_env.robot_models.robot_examples_factory import create_standard_robot
from bc_gym_planning_env.utilities.costmap_2d import CostMap2D
from bc_gym_planning_env.utilities.serialize import Serializable
from bc_gym_planning_env.utilities.costmap_utils import clone_costmap
from bc_gym_planning_env.utilities.coordinate_transformations import world_to_pixel
from bc_gym_planning_env.utilities.path_tools import get_pixel_footprint
from bc_gym_planning_env.utilities.path_tools import refine_path
from bc_gym_planning_env.envs.base.draw import draw_environment
from bc_gym_planning_env.envs.base.obs import Observation
from bc_gym_planning_env.envs.base.params import EnvParams
from bc_gym_planning_env.envs.base import spaces
from bc_gym_planning_env.envs.base.reward_provider_examples_factory import\
create_reward_provider_state, get_reward_provider_example
from bc_gym_planning_env.utilities.gui import OpenCVGui
def _get_element_from_list_with_delay(item_list, element, delay):
"""
A little util for faking delay of data stream. e.g.
```
l = []
get = generate_delay(l, 3)
for i in range(10):
print get(i)
```
prints
0 0 0 1 2 3 4 5 6
:param item_list list: list of items
:param element object: Just any python object
:param delay int: how many items to delay by
:return: a function that fakes a delay data stream, see above
"""
item_list.append(element)
if len(item_list) > delay:
return item_list.pop(0)
else:
return item_list[0]
@attr.s(cmp=False)
class State(Serializable):
""" State of the environemnt that you can reset your environment to.
However, it doesn't contain parametrization. """
reward_provider_state = attr.ib(type=object)
path = attr.ib(type=np.ndarray)
original_path = attr.ib(type=np.ndarray)
costmap = attr.ib(type=CostMap2D)
iter_timeout = attr.ib(type=int)
current_time = attr.ib(type=float)
current_iter = attr.ib(type=int)
robot_collided = attr.ib(type=bool)
poses_queue = attr.ib(type=list)
robot_state_queue = attr.ib(type=list)
control_queue = attr.ib(type=list)
pose = attr.ib(type=np.ndarray)
robot_state = attr.ib(type=object)
VERSION = 1
def copy(self):
""" Get the copy of the environment.
:return State: get the state of the environment
"""
# pylint: disable=no-member
return attr.evolve(
self,
reward_provider_state=self.reward_provider_state.copy(),
path=np.copy(self.path),
pose=np.copy(self.pose),
original_path=np.copy(self.original_path),
costmap=clone_costmap(self.costmap),
poses_queue=copy.deepcopy(self.poses_queue),
robot_state_queue=copy.deepcopy(self.robot_state_queue),
control_queue=copy.deepcopy(self.control_queue),
robot_state=self.robot_state.copy()
)
def __eq__(self, other):
# pylint: disable=too-many-return-statements
if not isinstance(other, State):
return False
if self.reward_provider_state != other.reward_provider_state:
return False
if (self.path != other.path).any():
return False
if (self.original_path != other.original_path).any():
return False
if self.costmap != other.costmap:
return False
if self.iter_timeout != other.iter_timeout:
return False
if self.current_time != other.current_time:
return False
if self.current_iter != other.current_iter:
return False
if self.robot_collided != other.robot_collided:
return False
if self.poses_queue != other.poses_queue:
return False
if self.robot_state_queue != other.robot_state_queue:
return False
if self.control_queue != other.control_queue:
return False
if (self.pose != other.pose).any():
return False
if self.robot_state != other.robot_state:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def deserialize(cls, state):
ver = state.pop('version')
assert ver == cls.VERSION
state['costmap'] = CostMap2D.from_state(state['costmap'])
reward_provider_state_instance = create_reward_provider_state(state.pop('reward_provider_state_name'))
state['reward_provider_state'] = reward_provider_state_instance.deserialize(state['reward_provider_state'])
# prepare for robot state deserialization
robot_instance = create_standard_robot(state.pop('robot_type_name'))
robot_state_type = robot_instance.get_state_type()
# deserialize the robot state
state['robot_state'] = robot_state_type.deserialize(state['robot_state'])
# deserialize robot state queue
acc = []
for item in state['robot_state_queue']:
acc.append(robot_state_type.deserialize(item))
state['robot_state_queue'] = acc
return cls(**state)
def serialize(self):
resu = attr.asdict(self)
# pylint: disable=no-member
resu['version'] = self.VERSION
resu['costmap'] = self.costmap.get_state()
resu['reward_provider_state_type_name'] = self.reward_provider_state.get_reward_provider_state_type_name()
resu['reward_provider_state'] = self.reward_provider_state.serialize()
resu['robot_type_name'] = self.robot_state.get_robot_type_name()
resu['robot_state'] = self.robot_state.serialize()
return resu
def make_initial_state(path, costmap, robot, reward_provider, params):
""" Prepare the initial full state of the planning environment
:param path: the static path to follow
:param costmap: the static costmap containg all the obstacles
:param robot: robot - we will execute the motion based on its model
:param reward_provider: an instance of the reward computing class
:param params: parametriztion of the environment
:return State: the full initial state of the environment
"""
if params.refine_path:
path = refine_path(path, params.path_delta)
assert path.shape[1] == 3
# generate robot_state, poses,
initial_pose = path[0]
robot_state = robot.get_initial_state()
robot_state.set_pose(initial_pose)
initial_reward_provider_state = reward_provider.generate_initial_state(path, params.reward_provider_params)
return State(
reward_provider_state=initial_reward_provider_state,
path=np.ascontiguousarray(initial_reward_provider_state.current_path()),
original_path=np.copy(np.ascontiguousarray(path)),
costmap=costmap,
iter_timeout=params.iteration_timeout,
current_time=0.0,
current_iter=0,
robot_collided=False,
pose=initial_pose,
poses_queue=[],
robot_state=robot_state,
robot_state_queue=[],
control_queue=[],
)
class PlanEnv(Serializable):
""" Poses planning problem as OpenAI gym task. """
def __init__(self, costmap, path, params):
"""
:param costmap CostMap2D: costmap denoting obstacles
:param path array(N, 3): oriented path, presented as way points
:param params EnvParams: parametrization of the environment
"""
# Stateful things
self._robot = TricycleRobot(dimensions=get_dimensions_example(params.robot_name))
reward_provider_example = get_reward_provider_example(params.reward_provider_name)
self._reward_provider = reward_provider_example(params=params.reward_provider_params)
# Properties, things without state
self.action_space = spaces.Box(
low=np.array([-self._robot.get_max_front_wheel_speed() / 2, -np.pi/2]),
high=np.array([self._robot.get_max_front_wheel_speed(), np.pi/2]),
dtype=np.float32)
self.reward_range = (0.0, 1.0)
self._gui = OpenCVGui()
self._params = params
# State
self._state = make_initial_state(path, costmap, self._robot, self._reward_provider, params)
self._initial_state = self._state.copy()
self.set_state(self._state)
def serialize(self):
serialized = {
'version': self.VERSION,
'state': self._state.serialize(),
'params': self._params.serialize(),
'path': self._state.original_path,
'costmap': self._state.costmap.get_state()
}
return serialized
@classmethod
def deserialize(cls, state):
ver = state.pop('version')
assert ver == cls.VERSION
init_costmap = CostMap2D.from_state(state['costmap'])
init_path = state['path']
params = EnvParams.deserialize(state['params'])
state = State.deserialize(state['state'])
instance = cls(init_costmap, init_path, params)
instance.set_state(state)
return instance
def set_state(self, state):
""" Set the state of the environment
:param state State: State of the environment to set the env to
"""
state = state.copy()
self._state = state
self._robot.set_state(self._state.robot_state)
self._reward_provider.set_state(self._state.reward_provider_state)
def get_state(self):
""" Get current state (but not parametrization) of the environment
:return State: the state of the environment
"""
return self._state.copy()
def reset(self):
"""
Resets the state of the environment and returns an initial observation.
Resets the 'done' state as well.
:return Observation: observation on reset of the environment,
to be fed to agent as the initial observation.
"""
self.set_state(self._initial_state)
return self._extract_obs()
def render(self, mode='human'):
"""
Render human-friendly representation of the environment on the screen.
:param mode str: the mode of rendering, currently only 'human' works
:return np.ndarray: the human-friendly image representation returned by the environment
"""
if mode not in ['human', 'rgb_array']:
raise NotImplementedError
img = draw_environment(self._state.path, self._state.original_path, self._robot, self._state.costmap)
if mode == 'human':
return self._gui.display(img)
else:
return img
def close(self):
""" Do whatever you need to do on closing: release the resources etc. """
self._gui.close()
def seed(self, seed=None):
""" Seeding actually doesn't do on the level of this environment,
as it should be fully deterministic. The environments deriving or
using this class it might do something here
:param seed object: whatever you want to use for seeding
"""
pass
def step(self, action):
"""
Run one timestep of the planning environment's dynamics, until end of
episode is reached.
Returns:
observation (Observation): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (boolean): whether the episode has ended, in which case further step() calls have no point
info (dict): contains auxiliary diagnostic information (e.g. helpful for debugging)
:param action: (wheel_v, wheel_angle)
:return Tuple[Observation, float, bool, Dict]: the stuff env shuold return
"""
# Process the environment dynamics
self._state = self._resolve_state_transition(action, self._state)
reward = self._reward_provider.reward(self._state)
self._state.reward_provider_state = self._reward_provider.get_state()
self._state.path = self._reward_provider.get_current_path()
obs = self._extract_obs()
info = self._extract_info()
done = self._extract_done(self._state)
return obs, reward, done, info
def _resolve_state_transition(self, action, state):
"""
Mutate state of the environment based on the received motion command.
:param action Tuple[float, float]: motion command (wheel_v, wheel_angle)
:param state State: current state of the environment
:return State: the state of the environment after application of the transition function
"""
delayed_action = _get_element_from_list_with_delay(
state.control_queue, action, self._params.control_delay
)
collided = _env_step(self._state.costmap, self._robot, self._params.dt, delayed_action)
pose = self._robot.get_pose()
delayed_pose = _get_element_from_list_with_delay(
state.poses_queue, pose, self._params.pose_delay
)
current_time = state.current_time + self._params.dt
current_iter = state.current_iter + 1
robot_state = self._robot.get_state()
delayed_robot_state = _get_element_from_list_with_delay(
state.robot_state_queue, robot_state, self._params.state_delay
)
state.current_time = current_time
state.current_iter = current_iter
state.robot_collided = state.robot_collided or collided
state.pose = delayed_pose
state.path = self._reward_provider.get_current_path()
state.robot_state = delayed_robot_state
return state
def _has_timed_out(self):
"""
Has the environment timed out?
:return bool: Has the environment timed out?
"""
return self._state.current_iter >= self._params.iteration_timeout
def _extract_done(self, state):
"""
Extract if we are done with this enviroment.
For example we are done, if the goal has been reached,
we have timed out or the robot has collided.
:param state: current state of the environment
:return bool: are we done with this planning environment?
"""
goal_reached = self._reward_provider.done(state)
timed_out = self._has_timed_out()
done = goal_reached or timed_out or self._state.robot_collided
return done
def _extract_obs(self):
"""
Extract an observation from the environment.
:return Observation: the observation to process
"""
return Observation(
pose=self._state.pose,
path=self._state.path,
costmap=self._state.costmap,
robot_state=self._state.robot_state,
time=self._state.current_time,
dt=self._params.dt
)
@staticmethod
def _extract_info():
""" Extract debug information from the env. For now empty.
:return Dict: empty dict (for now) """
return {}
def _env_step(costmap, robot, dt, control_signals):
"""
Execute movement step for the robot.
:param costmap Costmap2D: costmap containing the obstacles to potentially collide with
:param robot: Robot that will execute the movement based on its model
:param dt: time interval between time steps
:param control_signals: motion primitives to executed
:return bool: Does it collide?
"""
old_position = robot.get_pose()
robot.step(dt, control_signals)
new_position = robot.get_pose()
x, y, angle = new_position
collides = pose_collides(x, y, angle, robot, costmap)
if collides:
robot.set_pose(*old_position)
return collides
def pose_collides(x, y, angle, robot, costmap):
"""
Check if robot footprint at x, y (world coordinates) and
oriented as yaw collides with lethal obstacles.
:param x: robot pose
:param y: robot pose
:param angle: robot pose
:param robot: Robot that will supply the footprint
:param costmap Costmap2D: costmap containing the obstacles to collide with
:return bool : does the pose collide?
"""
kernel_image = get_pixel_footprint(angle, robot.get_footprint(), costmap.get_resolution())
# Get the coordinates of where the footprint is inside the kernel_image (on pixel coordinates)
kernel = np.where(kernel_image)
# Move footprint to (x,y), all in pixel coordinates
x, y = world_to_pixel(np.array([x, y]), costmap.get_origin(), costmap.get_resolution())
collisions = y + kernel[0] - kernel_image.shape[0] // 2, x + kernel[1] - kernel_image.shape[1] // 2
raw_map = costmap.get_data()
# Check if the footprint pixel coordinates are valid, this is, if they are not negative and are inside the map
good = np.logical_and(np.logical_and(collisions[0] >= 0, collisions[0] < raw_map.shape[0]),
np.logical_and(collisions[1] >= 0, collisions[1] < raw_map.shape[1]))
# Just from the footprint coordinates that are good, check if they collide
# with obstacles inside the map
return bool(np.any(raw_map[collisions[0][good],
collisions[1][good]] == CostMap2D.LETHAL_OBSTACLE))
|
the-stack_106_24987 | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "0.1.2"
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'azure-mgmt-reservations==0.1.0',
'azure-cli-core'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-reservations',
version=VERSION,
description='Microsoft Azure Command-Line Tools Reservations Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/azure/azure-cli',
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
'azure.cli.command_modules.reservations',
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
|
the-stack_106_24988 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Config Utility to write .bazelrc based on tensorflow."""
import re
import sys
import tensorflow as tf
def write_config():
"""Retrive compile and link information from tensorflow and write to .bazelrc."""
cflags = tf.sysconfig.get_compile_flags()
inc_regex = re.compile("^-I")
opt_regex = re.compile("^-D")
include_list = []
opt_list = []
for arg in cflags:
if inc_regex.match(arg):
include_list.append(arg)
elif opt_regex.match(arg):
opt_list.append(arg)
else:
print("WARNING: Unexpected cflag item {}".format(arg))
if len(include_list) != 1:
print(
"ERROR: Expected a single include directory in "
+ "tf.sysconfig.get_compile_flags(): ",
include_list,
)
exit(1)
library_regex = re.compile("^-l")
libdir_regex = re.compile("^-L")
library_list = []
libdir_list = []
lib = tf.sysconfig.get_link_flags()
if sys.platform != "win32":
for arg in lib:
if library_regex.match(arg):
library_list.append(arg)
elif libdir_regex.match(arg):
libdir_list.append(arg)
else:
print("WARNING: Unexpected link flag item {}".format(arg))
if len(library_list) != 1 or len(libdir_list) != 1:
print(
"ERROR: Expected exactly one lib and one libdir in "
+ "tf.sysconfig.get_link_flags()",
library_list,
libdir_list,
)
exit(1)
try:
with open(".bazelrc", "w") as bazel_rc:
for opt in opt_list:
bazel_rc.write('build --copt="{}"\n'.format(opt))
header_dir = include_list[0][2:]
if sys.platform == "win32":
header_dir = header_dir.replace("\\", "/")
bazel_rc.write('build --action_env TF_HEADER_DIR="{}"\n'.format(header_dir))
if sys.platform == "win32":
library_dir = include_list[0][2:-7] + "python"
library_dir = library_dir.replace("\\", "/")
else:
library_dir = libdir_list[0][2:]
bazel_rc.write(
'build --action_env TF_SHARED_LIBRARY_DIR="{}"\n'.format(library_dir)
)
if sys.platform == "win32":
library_name = "_pywrap_tensorflow_internal.lib"
else:
library_name = library_list[0][2:]
if library_name.startswith(":"):
library_name = library_name[1:]
elif sys.platform == "darwin":
library_name = "lib" + library_name + ".dylib"
else:
library_name = "lib" + library_name + ".so"
bazel_rc.write(
'build --action_env TF_SHARED_LIBRARY_NAME="{}"\n'.format(library_name)
)
bazel_rc.write('build --cxxopt="-std=c++14"\n')
for argv in sys.argv[1:]:
if argv == "--cuda":
bazel_rc.write('build --action_env TF_NEED_CUDA="1"\n')
bazel_rc.write(
'build --action_env CUDA_TOOLKIT_PATH="/usr/local/cuda"\n'
)
bazel_rc.write(
'build --action_env CUDNN_INSTALL_PATH="/usr/lib/x86_64-linux-gnu"\n'
)
bazel_rc.write('build --action_env TF_CUDA_VERSION="10.1"\n')
bazel_rc.write('build --action_env TF_CUDNN_VERSION="7"\n')
# Enable platform specific config
bazel_rc.write('build --enable_platform_specific_config\n')
# Needed for GRPC build
bazel_rc.write('build:macos --copt="-DGRPC_BAZEL_BUILD"\n')
# Stay with 10.14 for macOS
bazel_rc.write('build:macos --copt="-mmacosx-version-min=10.14"\n')
bazel_rc.write('build:macos --linkopt="-mmacosx-version-min=10.14"\n')
# MSVC (Windows): Standards-conformant preprocessor mode
bazel_rc.write('build:windows --copt="/Zc:preprocessor"\n')
bazel_rc.close()
except OSError:
print("ERROR: Writing .bazelrc")
exit(1)
write_config()
|
the-stack_106_24990 | import streamlit as st
def color_performance(column):
"""
color values in given column using green/red based on value>0
Args:
column:
Returns:
"""
color = 'green' if column > 0 else 'red'
return f'color: {color}'
def money_color(value):
if value < 0:
color = 'red'
elif value > 0:
color = 'green'
else:
color = 'gray'
return color
|
the-stack_106_24993 | import pyglet
class Coin(pyglet.sprite.Sprite):
red_image = pyglet.resource.image('assets/coin-red.png')
blue_image = pyglet.resource.image('assets/coin-blue.png')
blue_victory_image = pyglet.resource.image('assets/coin-blue-victory.png')
red_victory_image = pyglet.resource.image('assets/coin-red-victory.png')
def __init__(self, x, y, batch, player, group, board_position):
super().__init__(img=self.red_image, x=x, y=y, batch=batch)
self.scale = 0.35
self.group = group
self.board_position = board_position
self.image = Coin.player_image(player)
def get_position(self):
return self.board_position
def set_victory(self):
if self.image == self.blue_image:
self.image = self.blue_victory_image
else:
self.image = self.red_victory_image
@staticmethod
def player_image(player):
if player == -1:
return Coin.blue_image
return Coin.red_image
|
the-stack_106_24994 | #!/usr/bin/env python
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
from __future__ import absolute_import
import sys
__author__ = "Sorin Sbarnea"
__copyright__ = "Copyright 2010-2018, Sorin Sbarnea"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = ('tee', 'colorer', 'unicode',
'execfile2', 'singleton', 'ansiterm', '__version__')
if sys.hexversion < 0x02050000:
sys.exit("Python 2.5 or newer is required by tendo module.")
|
the-stack_106_24995 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ReadTensorboardBlobData
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync]
from google.cloud import aiplatform_v1
def sample_read_tensorboard_blob_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ReadTensorboardBlobDataRequest(
time_series="time_series_value",
)
# Make the request
stream = client.read_tensorboard_blob_data(request=request)
# Handle the response
for response in stream:
print(response)
# [END aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync]
|
the-stack_106_24996 | import pyb
import stm
# The BLUE Led on the pyboard is on pin B4
LED_MASK = const(1 << 4)
# The BLUE Led on the pyboard is active high
@micropython.viper
def v_led_pulse():
LED_BSRR = ptr16(stm.GPIOB + stm.GPIO_BSRR)
LED_BSRR[0] = LED_MASK # high = on
pyb.delay(100)
LED_BSRR[1] = LED_MASK # low = off
pyb.delay(100)
@micropython.viper
def v_blink():
for i in range(10):
v_led_pulse()
v_led_pulse()
pyb.delay(600)
v_blink()
|
the-stack_106_24997 | def check_command_line_arguments(in_arg):
"""
Prints each of the command line arguments passed in as parameter in_arg.
Parameters:
in_arg -data structure that stores the command line arguments object
Returns:
Nothing - just prints to console
"""
if in_arg is None:
print("* Doesn't Check the Command Line Arguments because 'get_input_args' hasn't been defined.")
else:
# prints command line agrs
print("\nCommand Line Arguments:\n dir =", in_arg.dir,
"\n arch =", in_arg.arch, "\n dogfile =", in_arg.dogfile)
def check_creating_pet_image_labels(results_dic):
"""
Prints first 10 key-value pairs and makes sure there are 40 key-value
pairs in your results_dic dictionary.
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
Returns:
Nothing - just prints to console
"""
if results_dic is None:
print("\n* Doesn't Check the Results Dictionary because 'get_pet_labels' hasn't been defined.\n")
else:
# Code to print 10 key-value pairs (or fewer if less than 10 images)
# & makes sure there are 40 pairs, one for each file in pet_images/
stop_point = len(results_dic)
if stop_point > 10:
stop_point = 10
print("\nPet Image Label Dictionary has", len(results_dic),
"key-value pairs.\nBelow are", stop_point, "of them:")
# counter - to count how many labels have been printed
n = 0
# for loop to iterate through the dictionary
for key in results_dic:
# prints only first 10 labels
if n < stop_point:
print("{:2d} key: {:>30} label: {:>26}".format(n+1, key,
results_dic[key][0]) )
# Increments counter
n += 1
# If past first 10 (or fewer) labels the breaks out of loop
else:
break
def check_classifying_images(results_dic):
"""
Prints Pet Image Label and Classifier Label for ALL Matches followed by ALL
NOT matches. Next prints out the total number of images followed by how
many were matches and how many were not-matches to check all 40 images are
processed.
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
Returns:
Nothing - just prints to console
"""
if results_dic is None:
print("\n* Doesn't Check the Results Dictionary because 'classify_images' hasn't been defined.\n")
elif len(results_dic[next(iter(results_dic))]) < 2:
print("\n* Doesn't Check the Results Dictionary because 'classify_images' hasn't been defined.\n")
else:
# Code for checking classify_images
# Checks matches and not matches are classified correctly
# Checks that all 40 images are classified as a Match or Not-a Match
# Sets counters for matches & NOT-matches
n_match = 0
n_notmatch = 0
# Prints all Matches first
print("\nAll image matches:")
print("| Name | Real | Classifier |")
print("--------------------------------------------------------------------------------------------------------------------------------------")
for key in results_dic:
# Prints only if a Match Index 2 == 1
if results_dic[key][2] == 1:
# Increments Match counter
n_match += 1
print("\n{:>36}| {:>27}| {:>66}|".format(key,
results_dic[key][0], results_dic[key][1]))
print("--------------------------------------------------------------------------------------------------------------------------------------")
# Prints all NOT-Matches next
print("\nImages that didn't match:")
print("| Name | Real | Classifier |")
print("--------------------------------------------------------------------------------------------------------------------------------------")
for key in results_dic:
# Prints only if NOT-a-Match Index 2 == 0
if results_dic[key][2] == 0:
# Increments Not-a-Match counter
n_notmatch += 1
print("\n{:>36}| {:>27}| {:>66}|".format(key,
results_dic[key][0], results_dic[key][1]))
print("--------------------------------------------------------------------------------------------------------------------------------------")
# Prints Total Number of Images - expects 40 from pet_images folder
print("# Total Images",n_match + n_notmatch, "# Matches:",n_match ,
"# NOT Matches:",n_notmatch)
print("--------------------------------------------------------------------------------------------------------------------------------------")
def check_classifying_labels_as_dogs(results_dic):
"""
Prints Pet Image Label, Classifier Label, whether Pet Label is-a-dog(1=Yes,
0=No), and whether Classifier Label is-a-dog(1=Yes, 0=No) for ALL Matches
followed by ALL NOT matches. Next prints out the total number of images
followed by how many were matches and how many were not-matches to check
all 40 images are processed.
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
Returns:
Nothing - just prints to console
"""
if results_dic is None:
print("\n* Doesn't Check the Results Dictionary because 'adjust_results4_isadog' hasn't been defined.\n")
elif len(results_dic[next(iter(results_dic))]) < 4 :
print("\n* Doesn't Check the Results Dictionary because 'adjust_results4_isadog' hasn't been defined.\n")
else:
# Code for checking adjust_results4_isadog
# Checks matches and not matches are classified correctly as "dogs" and
# "not-dogs" Checks that all 40 images are classified as a Match or Not-a
# Match
# Sets counters for matches & NOT-matches
n_match = 0
n_notmatch = 0
# Prints all Matches first
print("\n\nImages classified correctly with IS-A-DOG marks:")
print("| Name | Real | Classifier | Image label = Dog | Classifier label = Dog |")
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
for key in results_dic:
# Prints only if a Match Index 2 == 1
if results_dic[key][2] == 1:
# Increments Match counter
n_match += 1
print("\n{:>36}| {:>27}| {:>66}| {:>18}| {:>23}|".format(key,
results_dic[key][0], results_dic[key][1], results_dic[key][3],
results_dic[key][4]))
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
# Prints all NOT-Matches next
print("\nINcorrect classification with IS-A-DOG marks:")
print("| Name | Real | Classifier | Image label = Dog | Classifier label = Dog |")
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
for key in results_dic:
# Prints only if NOT-a-Match Index 2 == 0
if results_dic[key][2] == 0:
# Increments Not-a-Match counter
n_notmatch += 1
print("\n{:>36}| {:>27}| {:>66}| {:>18}| {:>23}|".format(key,
results_dic[key][0], results_dic[key][1], results_dic[key][3],
results_dic[key][4]))
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
# Prints Total Number of Images - expects 40 from pet_images folder
print("# Total Images",n_match + n_notmatch, "# Matches:",n_match ,
"# NOT Matches:",n_notmatch)
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
def check_calculating_results(results_dic, results_stats_dic):
"""
Prints First statistics from the results stats dictionary (that was created
by the calculates_results_stats() function), then prints the same statistics
that were calculated in this function using the results dictionary.
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
results_stats_dic - Dictionary that contains the results statistics (either
a percentage or a count) where the key is the statistic's
name (starting with 'pct' for percentage or 'n' for count)
and the value is the statistic's value
Returns:
Nothing - just prints to console
"""
if results_stats_dic is None:
print("\n* Doesn't Check the Results Dictionary because 'calculates_results_stats' hasn't been defined.\n")
else:
# Code for checking results_stats_dic -
# Checks calculations of counts & percentages BY using results_dic
# to re-calculate the values and then compare to the values
# in results_stats_dic
# Initialize counters to zero and number of images total
n_images = len(results_dic)
n_pet_dog = 0
n_class_cdog = 0
n_class_cnotd = 0
n_match_breed = 0
# Interates through results_dic dictionary to recompute the statistics
# outside of the calculates_results_stats() function
for key in results_dic:
# match (if dog then breed match)
if results_dic[key][2] == 1:
# isa dog (pet label) & breed match
if results_dic[key][3] == 1:
n_pet_dog += 1
# isa dog (classifier label) & breed match
if results_dic[key][4] == 1:
n_class_cdog += 1
n_match_breed += 1
# NOT dog (pet_label)
else:
# NOT dog (classifier label)
if results_dic[key][4] == 0:
n_class_cnotd += 1
# NOT - match (not a breed match if a dog)
else:
# NOT - match
# isa dog (pet label)
if results_dic[key][3] == 1:
n_pet_dog += 1
# isa dog (classifier label)
if results_dic[key][4] == 1:
n_class_cdog += 1
# NOT dog (pet_label)
else:
# NOT dog (classifier label)
if results_dic[key][4] == 0:
n_class_cnotd += 1
# calculates statistics based upon counters from above
n_pet_notd = n_images - n_pet_dog
pct_corr_dog = ( n_class_cdog / n_pet_dog )*100
pct_corr_notdog = ( n_class_cnotd / n_pet_notd )*100
pct_corr_breed = ( n_match_breed / n_pet_dog )*100
# prints calculated statistics
print("\n\nGeneral statistics:")
print("\nNumber of images: {:2d} \nNumber of images with dogs: {:2d} \nNumber of other images: {:2d} \n\nCorrect dog classification: {:5.1f}% \nCorrect not dog classification: {:5.1f}% \nCorrect dog's breed classification: {:5.1f}%\n".format(
results_stats_dic['n_images'], results_stats_dic['n_dogs_img'],
results_stats_dic['n_notdogs_img'], results_stats_dic['pct_correct_dogs'],
results_stats_dic['pct_correct_notdogs'],
results_stats_dic['pct_correct_breed']))
|
the-stack_106_24999 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
y = np.random.standard_normal((1000, 2))
c = np.random.randint(0, 10, len(y))
print(y)
plt.figure(figsize=(7,5))
plt.hist(y, label=['1st', '2nd'], color=['b', 'g'], stacked=True, bins=20)
plt.grid(True)
plt.legend(loc=0)
plt.xlabel('value')
plt.ylabel('frequency')
plt.title('Histogram')
plt.show()
|
the-stack_106_25003 | """
Stream test:
Pull the video from the drone and display in cv2 window.
Optionally encode video and dump to file.
"""
import av
import numpy
import tellopy
import cv2
def encode(frame, ovstream, output):
"""
convert frames to packets and write to file
"""
try:
pkt = ovstream.encode(frame)
except Exception as err:
print("encoding failed{0}".format(err))
if pkt is not None:
try:
output.mux(pkt)
except Exception:
print('mux failed: ' + str(pkt))
return True
def main():
# Set up tello streaming
drone = tellopy.Tello()
drone.log.set_level(2)
drone.connect()
drone.start_video()
# container for processing the packets into frames
container = av.open(drone.get_video_stream())
video_st = container.streams.video[0]
# stream and outputfile for video
output = av.open('archive.mp4', 'w')
ovstream = output.add_stream('mpeg4', video_st.rate)
ovstream.pix_fmt = 'yuv420p'
ovstream.width = video_st.width
ovstream.height = video_st.height
counter = 0
save = True
for packet in container.demux((video_st,)):
for frame in packet.decode():
# convert frame to cv2 image and show
image = cv2.cvtColor(numpy.array(
frame.to_image()), cv2.COLOR_RGB2BGR)
cv2.imshow('frame', image)
key = cv2.waitKey(1) & 0xFF
# save initial 1300 frames
if save:
new_frame = av.VideoFrame(
width=frame.width, height=frame.height, format=frame.format.name)
for i in range(len(frame.planes)):
new_frame.planes[i].update(frame.planes[i])
encode(new_frame, ovstream, output)
counter += 1
print("Frames encoded:", counter)
if counter > 300:
output.close()
save == False
if __name__ == '__main__':
main()
|
the-stack_106_25004 | """
Custom Authenticator to use Bitbucket OAuth with JupyterHub
"""
import json
import urllib
from tornado.auth import OAuth2Mixin
from tornado import web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from traitlets import Set, default
from .oauth2 import OAuthLoginHandler, OAuthenticator
def _api_headers(access_token):
return {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}".format(access_token),
}
class BitbucketOAuthenticator(OAuthenticator):
login_service = "Bitbucket"
client_id_env = 'BITBUCKET_CLIENT_ID'
client_secret_env = 'BITBUCKET_CLIENT_SECRET'
@default("authorize_url")
def _authorize_url_default(self):
return "https://bitbucket.org/site/oauth2/authorize"
@default("token_url")
def _token_url_default(self):
return "https://bitbucket.org/site/oauth2/access_token"
team_whitelist = Set(
config=True, help="Automatically whitelist members of selected teams"
)
bitbucket_team_whitelist = team_whitelist
headers = {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}",
}
async def authenticate(self, handler, data=None):
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
grant_type="authorization_code",
code=code,
redirect_uri=self.get_callback_url(handler),
)
url = url_concat("https://bitbucket.org/site/oauth2/access_token", params)
bb_header = {"Content-Type": "application/x-www-form-urlencoded;charset=utf-8"}
req = HTTPRequest(
url,
method="POST",
auth_username=self.client_id,
auth_password=self.client_secret,
body=urllib.parse.urlencode(params).encode('utf-8'),
headers=bb_header,
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# Determine who the logged in user is
req = HTTPRequest(
"https://api.bitbucket.org/2.0/user",
method="GET",
headers=_api_headers(access_token),
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
username = resp_json["username"]
# Check if user is a member of any whitelisted teams.
# This check is performed here, as the check requires `access_token`.
if self.bitbucket_team_whitelist:
user_in_team = await self._check_team_whitelist(username, access_token)
if not user_in_team:
self.log.warning("%s not in team whitelist", username)
return None
return {
'name': username,
'auth_state': {'access_token': access_token, 'bitbucket_user': resp_json},
}
async def _check_team_whitelist(self, username, access_token):
http_client = AsyncHTTPClient()
headers = _api_headers(access_token)
# We verify the team membership by calling teams endpoint.
next_page = url_concat(
"https://api.bitbucket.org/2.0/teams", {'role': 'member'}
)
while next_page:
req = HTTPRequest(next_page, method="GET", headers=headers)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
next_page = resp_json.get('next', None)
user_teams = set([entry["username"] for entry in resp_json["values"]])
# check if any of the organizations seen thus far are in whitelist
if len(self.bitbucket_team_whitelist & user_teams) > 0:
return True
return False
class LocalBitbucketOAuthenticator(LocalAuthenticator, BitbucketOAuthenticator):
"""A version that mixes in local system user creation"""
pass
|
the-stack_106_25007 | # -*- coding: utf-8 -*-
from datetime import datetime
from PIL import Image,ImageFont,ImageDraw
from copy import copy
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
from gsiot.v3 import *
from gsiot.v3.file.gsdisplay import device
from gsiot.v3.file.gsdisplay.ili9341 import ILI9341
# tk窗口显示
class container(device):
def __init__(self, **argv):
device.__init__(self,**argv)
self.disp=None
self.buff=None
self.backimg=None
self.layers=[]
self.angle=0
self.expand=None
if ("angle" in argv): self.angle =argv["angle"]
if ("expand" in argv): self.expand =argv["expand"]
#self.backcolor=(4,77,128)
self.backcolor=(0,0,0)
self.fillcolor=(255,255,255)
self.isonline_interval=0
self.dc = 3 # 18
if ("dc" in argv): self.dc =argv["dc"]
self.RST = 25 # 23
SPI_PORT = 0
SPI_DEVICE = 0
self.disp=Image.new('RGBA', (self.width,self.height),(0, 0, 0, 0))
if " ".join(platform.uname()).find("Linux") >= 0:
self.dev = ILI9341(self.dc, rst=self.RST, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=64000000))
self.dev.begin()
def backimage(self,value=None):
if value==None:return self.backimg
else:self.backimg=Image.open(value)
def display(self):
self.buff=copy(self.disp)
if self.backimg!=None:self.buff.paste(self.backimg,(0,0))
for layer in self.layers:
if layer.value!=None:
strtype=layer.type
if strtype=="image":self.buff.paste(layer.value,(layer.left,layer.top))
elif strtype=="text" :pass
if self.expend==None:self.buff=self.buff.rotate(self.angle)
else:self.buff=self.buff.rotate(self.angle,expand=self.expend)
if " ".join(platform.uname()).find("Windows") >= 0:self.buff.show()
elif " ".join(platform.uname()).find("Linux") >= 0:
self.dev.buffer.paste(self.buff,(0,0))
self.dev.display()
def getImagefromText(self,data,txtfont,backcolor,fillcolor,parent=None):
w,h=(0,0)
if parent==None:parent=self.disp
draw = ImageDraw.Draw(parent)
for text in data.split("\n"):
text = text.decode('utf-8') # 转换字符编码.
# print text
width, height = draw.textsize(text, font=txtfont)
# textdraw = ImageDraw.Draw(textimage)
draw.text(((w-width)/2, h), text, font=txtfont, fill=fillcolor)
h+=height+5
#return self.transparent_back(textimage)
def getFont(self,fontpath,size):return ImageFont.truetype(fontpath, size)
def getImage(self,imagefile):return Image.open(imagefile)
def transparent_back(self,img):
img=img.convert('RGBA')
L,H=img.size
color_0=img.getpixel((0,0))
for h in range(H):
for l in range(L):
dot=(l,h)
color_1=img.getpixel(dot)
if color_1==color_0:
color_1=color_1[:-1]+(0,)
img.putpixel(dot,color_1)
return img
if __name__=="__main__":
dev=container(dc=4,angle=0,width=240,height=320)
dev.loadfont(fontname=16,font=ImageFont.truetype(path+"/font/simhei.ttf", 16))
dev.loadfont(fontname=32,font=ImageFont.truetype(path+"/font/simhei.ttf", 32))
data=['如果安装失败,根据提示先把缺失','的包(比如openjpeg)装上']
for i in range(0,18):data.append(str(i))
for i in range(0,len(data)):
o=dev.getunit()
o.type="image"
o.left=0
o.top=i*16
o.font=ImageFont.truetype(path+"/font/simhei.ttf", 16)
o.text=data[i]
o.value=dev.getimagefromtext(o,Image.new('RGB', (320,16),(0,0,0,0)))
dev.layers.append(o)
dev.show()
# dev.start()
|
the-stack_106_25008 | import json
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union, TYPE_CHECKING
from ..config import Config
from .irresource import IRResource
if TYPE_CHECKING:
from .ir import IR
def qualify_service_name(ir: 'IR', service: str, namespace: Optional[str]) -> str:
fully_qualified = "." in service or "localhost" == service
if namespace != ir.ambassador_namespace and namespace and not fully_qualified and not ir.ambassador_module.use_ambassador_namespace_for_service_resolution:
# The target service name is not fully qualified.
# We are most likely targeting a simple k8s svc with kube-dns resolution.
# Make sure we actually resolve the service it's namespace, not the Ambassador process namespace.
service = f"{service}.{namespace}"
ir.logger.debug("KubernetesServiceResolver use_ambassador_namespace_for_service_resolution %s, fully qualified %s, upstream hostname %s" % (
ir.ambassador_module.use_ambassador_namespace_for_service_resolution,
fully_qualified,
service
))
return service
class IRBaseMapping (IRResource):
group_id: str
host: str
route_weight: List[Union[str, int]]
sni: bool
cached_status: Optional[Dict[str, str]]
status_update: Optional[Dict[str, str]]
def __init__(self, ir: 'IR', aconf: Config,
rkey: str, # REQUIRED
name: str, # REQUIRED
location: str, # REQUIRED
kind: str, # REQUIRED
namespace: Optional[str] = None,
metadata_labels: Optional[Dict[str, str]] = None,
apiVersion: str="getambassador.io/v2",
precedence: int=0,
cluster_tag: Optional[str]=None,
**kwargs) -> None:
# Default status.
self.cached_status = None
self.status_update = None
# Init the superclass...
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, location=location,
kind=kind, name=name, namespace=namespace, metadata_labels=metadata_labels,
apiVersion=apiVersion, precedence=precedence, cluster_tag=cluster_tag,
**kwargs
)
def setup(self, ir: 'IR', aconf: Config) -> bool:
# We assume that any subclass madness is managed already, so we can compute the group ID...
self.group_id = self._group_id()
# ...and the route weight.
self.route_weight = self._route_weight()
# We can also default the resolver, and scream if it doesn't match a resolver we
# know about.
if not self.get('resolver'):
self.resolver = self.ir.ambassador_module.get('resolver', 'kubernetes-service')
resolver = self.ir.get_resolver(self.resolver)
if not resolver:
self.post_error(f'resolver {self.resolver} is unknown!')
return False
self.ir.logger.debug("%s: GID %s route_weight %s, resolver %s" %
(self, self.group_id, self.route_weight, resolver))
# And, of course, we can make sure that the resolver thinks that this Mapping is OK.
if not resolver.valid_mapping(ir, self):
# If there's trouble, the resolver should've already posted about it.
return False
if self.get('circuit_breakers', None) is None:
self['circuit_breakers'] = ir.ambassador_module.circuit_breakers
if self.get('circuit_breakers', None) is not None:
if not self.validate_circuit_breakers(ir, self['circuit_breakers']):
self.post_error("Invalid circuit_breakers specified: {}, invalidating mapping".format(self['circuit_breakers']))
return False
return True
@staticmethod
def validate_circuit_breakers(ir: 'IR', circuit_breakers) -> bool:
if not isinstance(circuit_breakers, (list, tuple)):
return False
for circuit_breaker in circuit_breakers:
if '_name' in circuit_breaker:
# Already reconciled.
ir.logger.debug(f'Breaker validation: good breaker {circuit_breaker["_name"]}')
continue
ir.logger.debug(f'Breaker validation: {json.dumps(circuit_breakers, indent=4, sort_keys=True)}')
name_fields = [ 'cb' ]
if 'priority' in circuit_breaker:
prio = circuit_breaker.get('priority').lower()
if prio not in ['default', 'high']:
return False
name_fields.append(prio[0])
else:
name_fields.append('n')
digit_fields = [ ( 'max_connections', 'c' ),
( 'max_pending_requests', 'p' ),
( 'max_requests', 'r' ),
( 'max_retries', 't' ) ]
for field, abbrev in digit_fields:
if field in circuit_breaker:
try:
value = int(circuit_breaker[field])
name_fields.append(f'{abbrev}{value}')
except ValueError:
return False
circuit_breaker['_name'] = ''.join(name_fields)
ir.logger.debug(f'Breaker valid: {circuit_breaker["_name"]}')
return True
def get_label(self, key: str) -> Optional[str]:
labels = self.get('metadata_labels') or {}
return labels.get(key) or None
def status(self) -> Optional[str]:
"""
Return the new status we should have. Subclasses would typically override
this.
:return: new status (may be None)
"""
return None
def check_status(self) -> None:
crd_name = self.get_label('ambassador_crd')
if not crd_name:
return
# OK, we're supposed to be a CRD. What status do we want, and
# what do we have?
wanted = self.status()
if wanted != self.cached_status:
self.ir.k8s_status_updates[crd_name] = ('Mapping', self.namespace, wanted)
def _group_id(self) -> str:
""" Compute the group ID for this Mapping. Must be defined by subclasses. """
raise NotImplementedError("%s._group_id is not implemented?" % self.__class__.__name__)
def _route_weight(self) -> List[Union[str, int]]:
""" Compute the route weight for this Mapping. Must be defined by subclasses. """
raise NotImplementedError("%s._route_weight is not implemented?" % self.__class__.__name__)
def match_tls_context(self, host: str, ir: 'IR'):
for context in ir.get_tls_contexts():
hosts = context.get('hosts') or []
for context_host in hosts:
if context_host == host:
ir.logger.debug("Matched host {} with TLSContext {}".format(host, context.get('name')))
self.sni = True
return context
return None
|
the-stack_106_25012 | """Support for IOTA wallets."""
from datetime import timedelta
from iota import Iota
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.entity import Entity
CONF_IRI = "iri"
CONF_TESTNET = "testnet"
CONF_WALLET_NAME = "name"
CONF_WALLET_SEED = "seed"
CONF_WALLETS = "wallets"
DOMAIN = "iota"
IOTA_PLATFORMS = ["sensor"]
SCAN_INTERVAL = timedelta(minutes=10)
WALLET_CONFIG = vol.Schema(
{
vol.Required(CONF_WALLET_NAME): cv.string,
vol.Required(CONF_WALLET_SEED): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_IRI): cv.string,
vol.Optional(CONF_TESTNET, default=False): cv.boolean,
vol.Required(CONF_WALLETS): vol.All(cv.ensure_list, [WALLET_CONFIG]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the IOTA component."""
iota_config = config[DOMAIN]
for platform in IOTA_PLATFORMS:
load_platform(hass, platform, DOMAIN, iota_config, config)
return True
class IotaDevice(Entity):
"""Representation of a IOTA device."""
def __init__(self, name, seed, iri, is_testnet=False):
"""Initialise the IOTA device."""
self._name = name
self._seed = seed
self.iri = iri
self.is_testnet = is_testnet
@property
def name(self):
"""Return the default name of the device."""
return self._name
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return {CONF_WALLET_NAME: self._name}
@property
def api(self):
"""Construct API object for interaction with the IRI node."""
return Iota(adapter=self.iri, seed=self._seed)
|
the-stack_106_25019 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import pytest
import numpy as np
import image_preprocess
from PIL import Image
from paddle.inference import Config
from paddle.inference import PrecisionType
from paddle.inference import create_predictor
from test_src import test_gpu_model_jetson
def inference_ResNet50_vd(img, model_path, params_path):
"""
inference_ResNet50_vd
Args:
img: numpy img
model_path: model path
params_path: params path
Returns:
results : paddle inference output data
"""
config = Config(model_path, params_path)
config.enable_xpu(10 * 1024 * 1024)
config.enable_lite_engine(PrecisionType.Float32, True)
config.switch_ir_optim(True)
config.switch_use_feed_fetch_ops(False)
config.switch_specify_input_names(True)
config.enable_memory_optim()
predictor = create_predictor(config)
input_names = predictor.get_input_names()
im_size = 224
data = image_preprocess.preprocess(img, im_size)
data_input = [data]
for i, name in enumerate(input_names):
input_tensor = predictor.get_input_handle(name)
input_tensor.reshape(data_input[i].shape)
input_tensor.copy_from_cpu(data_input[i].copy())
# do the inference
predictor.run()
results = []
# get out data from output tensor
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
output_data = output_tensor.copy_to_cpu()
results.append(output_data)
return results
@pytest.mark.p0
def test_ResNet50_vd():
"""
test_ResNet50_vd
Args:
None
Returns:
None
"""
diff_standard = 9e-5
model_name = "ResNet50_vd"
test_model = test_gpu_model_jetson(model_name=model_name)
model_path, params_path = test_model.test_comb_model_path("cv_class_model")
img_name = 'bird.jpeg'
image_path = test_model.test_readdata(
path="cv_class_model", data_name=img_name)
img = cv2.imread(image_path)
with_lr_data = inference_ResNet50_vd(img, model_path, params_path)
npy_result = test_model.npy_result_path("cv_class_model")
test_model.test_diff(npy_result, with_lr_data[0], diff_standard)
# for test
# np.save("ResNet50_vd.npy",with_lr_data[0])
# print(np.argmax(with_lr_data[0][0]))
|
the-stack_106_25020 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model.utils.config import cfg
from model.faster_rcnn.faster_rcnn import _fasterRCNN
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import torch.utils.model_zoo as model_zoo
import pdb
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth',
'resnet34': 'https://s3.amazonaws.com/pytorch/models/resnet34-333f7ec4.pth',
'resnet50': 'https://s3.amazonaws.com/pytorch/models/resnet50-19c8e357.pth',
'resnet101': 'https://s3.amazonaws.com/pytorch/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://s3.amazonaws.com/pytorch/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# it is slightly better whereas slower to set stride = 1
# self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
class resnet(_fasterRCNN):
def __init__(self, classes, num_layers=101, pretrained=False, class_agnostic=False):
self.model_path = 'data/pretrained_model/resnet101_caffe.pth'
self.dout_base_model = 1024
self.pretrained = pretrained
self.class_agnostic = class_agnostic
self.num_layers = num_layers
_fasterRCNN.__init__(self, classes, class_agnostic)
def _init_modules(self):
resnet = resnet101()
if self.num_layers == 18:
resnet = resnet18()
if self.num_layers == 34:
resnet = resnet34()
if self.num_layers == 50:
resnet = resnet50()
if self.num_layers == 152:
resnet = resnet152()
if self.pretrained == True:
print("Loading pretrained weights from %s" %(self.model_path))
state_dict = torch.load(self.model_path)
resnet.load_state_dict({k:v for k,v in state_dict.items() if k in resnet.state_dict()})
# Build resnet.
self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1,resnet.relu,
resnet.maxpool,resnet.layer1,resnet.layer2,resnet.layer3)
self.RCNN_top = nn.Sequential(resnet.layer4)
self.RCNN_cls_score = nn.Linear(2048, self.n_classes)
if self.class_agnostic:
self.RCNN_bbox_pred = nn.Linear(2048, 4)
else:
self.RCNN_bbox_pred = nn.Linear(2048, 4 * self.n_classes)
# Fix blocks
for p in self.RCNN_base[0].parameters(): p.requires_grad=False
for p in self.RCNN_base[1].parameters(): p.requires_grad=False
assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
if cfg.RESNET.FIXED_BLOCKS >= 3:
for p in self.RCNN_base[6].parameters(): p.requires_grad=False
if cfg.RESNET.FIXED_BLOCKS >= 2:
for p in self.RCNN_base[5].parameters(): p.requires_grad=False
if cfg.RESNET.FIXED_BLOCKS >= 1:
for p in self.RCNN_base[4].parameters(): p.requires_grad=False
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters(): p.requires_grad=False
self.RCNN_base.apply(set_bn_fix)
self.RCNN_top.apply(set_bn_fix)
def train(self, mode=True):
# Override train so that the training mode is set as we want
nn.Module.train(self, mode)
if mode:
# Set fixed blocks to be in eval mode
self.RCNN_base.eval()
self.RCNN_base[5].train()
self.RCNN_base[6].train()
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.RCNN_base.apply(set_bn_eval)
self.RCNN_top.apply(set_bn_eval)
def _head_to_tail(self, pool5):
fc7 = self.RCNN_top(pool5).mean(3).mean(2)
return fc7
|
the-stack_106_25021 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from murano.tests.unit import base
from murano.dsl import murano_object
from murano.engine.system import heat_stack
MOD_NAME = 'murano.engine.system.heat_stack'
class TestHeatStack(base.MuranoTestCase):
def setUp(self):
super(TestHeatStack, self).setUp()
self.mock_murano_obj = mock.Mock(spec=murano_object.MuranoObject)
self.mock_murano_obj.name = 'TestObj'
self.mock_murano_obj.parents = []
@mock.patch('heatclient.client.Client')
def test_push_adds_version(self, mock_heat_client):
"""Assert that if heat_template_version is omitted, it's added."""
# Note that the 'with x as y, a as b:' syntax was introduced in
# python 2.7, and contextlib.nested was deprecated in py2.7
with mock.patch(MOD_NAME + '.HeatStack._get_status') as status_get:
with mock.patch(MOD_NAME + '.HeatStack._wait_state') as wait_st:
status_get.return_value = 'NOT_FOUND'
wait_st.return_value = {}
hs = heat_stack.HeatStack(self.mock_murano_obj,
None, None, None)
hs._heat_client = mock_heat_client
hs._name = 'test-stack'
hs._description = 'Generated by TestHeatStack'
hs._template = {'resources': {'test': 1}}
hs._parameters = {}
hs._applied = False
hs.push()
expected_template = {
'heat_template_version': '2013-05-23',
'description': 'Generated by TestHeatStack',
'resources': {'test': 1}
}
mock_heat_client.stacks.create.assert_called_with(
stack_name='test-stack',
disable_rollback=True,
parameters={},
template=expected_template
)
self.assertTrue(hs._applied)
@mock.patch('heatclient.client.Client')
def test_description_is_optional(self, mock_heat_client):
"""Assert that if heat_template_version is omitted, it's added."""
# Note that the 'with x as y, a as b:' syntax was introduced in
# python 2.7, and contextlib.nested was deprecated in py2.7
with mock.patch(MOD_NAME + '.HeatStack._get_status') as status_get:
with mock.patch(MOD_NAME + '.HeatStack._wait_state') as wait_st:
status_get.return_value = 'NOT_FOUND'
wait_st.return_value = {}
hs = heat_stack.HeatStack(self.mock_murano_obj,
None, None, None)
hs._heat_client = mock_heat_client
hs._name = 'test-stack'
hs._description = None
hs._template = {'resources': {'test': 1}}
hs._parameters = {}
hs._applied = False
hs.push()
expected_template = {
'heat_template_version': '2013-05-23',
'resources': {'test': 1}
}
mock_heat_client.stacks.create.assert_called_with(
stack_name='test-stack',
disable_rollback=True,
parameters={},
template=expected_template
)
self.assertTrue(hs._applied)
def test_update_wrong_template_version(self):
"""Template version other than expected should cause error."""
hs = heat_stack.HeatStack(self.mock_murano_obj,
None, None, None)
hs._name = 'test-stack'
hs._description = 'Generated by TestHeatStack'
hs._template = {'resources': {'test': 1}}
hs.type.properties = {}
erroring_template = {
'heat_template_version': 'something else'
}
with mock.patch(MOD_NAME + '.HeatStack.current') as current:
current.return_value = {}
e = self.assertRaises(heat_stack.HeatStackError,
hs.updateTemplate,
erroring_template)
err_msg = "Currently only heat_template_version 2013-05-23 "\
"is supported."
self.assertEqual(err_msg, str(e))
# Check it's ok without a version
hs.updateTemplate({})
expected = {'resources': {'test': 1}}
self.assertEqual(expected, hs._template)
# .. or with a good version
hs.updateTemplate({'heat_template_version': '2013-05-23'})
expected['heat_template_version'] = '2013-05-23'
self.assertEqual(expected, hs._template)
|
the-stack_106_25022 | """For each HUC12 we are processing, dump our present file database."""
import os
import sys
def main(argv):
"""Go Main Go."""
scenario = int(argv[1])
for line in open("myhucs.txt"):
huc12 = line.strip()
removed = 0
for subdir in [
"crop",
"error",
"man",
"ofe",
"prj",
"run",
"slp",
"sol",
"wb",
"yld",
"env",
"rot",
]:
mydir = "/i/%s/%s/%s/%s" % (scenario, subdir, huc12[:8], huc12[8:])
if not os.path.isdir(mydir):
continue
for _, _, fns in os.walk(mydir):
for fn in fns:
os.unlink("%s/%s" % (mydir, fn))
removed += 1
print(" %s removed %s files" % (huc12, removed))
if __name__ == "__main__":
main(sys.argv)
|
the-stack_106_25025 | #!/usr/bin/env python3
"""Module used to read devices on a Dallas 1-wire bus with an Embedded
Devices HA7S master.
The class that does the reading of the bus is HA7Sreader.
"""
import logging, time, re, glob
import serial
from . import base_reader
# ----------- Dallas 1-wire CRC Calculations -------------
crc8_table = [
0, 94, 188, 226, 97, 63, 221, 131, 194, 156, 126, 32, 163, 253, 31, 65,
157, 195, 33, 127, 252, 162, 64, 30, 95, 1, 227, 189, 62, 96, 130, 220,
35, 125, 159, 193, 66, 28, 254, 160, 225, 191, 93, 3, 128, 222, 60, 98,
190, 224, 2, 92, 223, 129, 99, 61, 124, 34, 192, 158, 29, 67, 161, 255,
70, 24, 250, 164, 39, 121, 155, 197, 132, 218, 56, 102, 229, 187, 89, 7,
219, 133, 103, 57, 186, 228, 6, 88, 25, 71, 165, 251, 120, 38, 196, 154,
101, 59, 217, 135, 4, 90, 184, 230, 167, 249, 27, 69, 198, 152, 122, 36,
248, 166, 68, 26, 153, 199, 37, 123, 58, 100, 134, 216, 91, 5, 231, 185,
140, 210, 48, 110, 237, 179, 81, 15, 78, 16, 242, 172, 47, 113, 147, 205,
17, 79, 173, 243, 112, 46, 204, 146, 211, 141, 111, 49, 178, 236, 14, 80,
175, 241, 19, 77, 206, 144, 114, 44, 109, 51, 209, 143, 12, 82, 176, 238,
50, 108, 142, 208, 83, 13, 239, 177, 240, 174, 76, 18, 145, 207, 45, 115,
202, 148, 118, 40, 171, 245, 23, 73, 8, 86, 180, 234, 105, 55, 213, 139,
87, 9, 235, 181, 54, 104, 138, 212, 149, 203, 41, 119, 244, 170, 72, 22,
233, 183, 85, 11, 136, 214, 52, 106, 43, 117, 151, 201, 74, 20, 246, 168,
116, 42, 200, 150, 21, 75, 169, 247, 182, 232, 10, 84, 215, 137, 107, 53]
def crc8_is_OK(hex_string):
"""Returns True if the hex_string ending in a CRC byte passes the
Dallas 1-wire CRC8 check.
Code adapted from: https://forum.sparkfun.com/viewtopic.php?p=51145.
"""
# break the Hex string into a list of bytes
byte_list = [int(hex_string[i:i+2], 16) for i in range(0, len(hex_string), 2)]
val = 0
for x in byte_list:
val = crc8_table[val ^ x]
# answer should be 0 if the byte string is valid
return val==0
def crc16_is_OK(hex_string):
"""Returns True if the hex_string ending in two CRC16 bytes passes
the Dallas 1-wire CRC16 check.
Code adapted from: http://forum.arduino.cc/index.php?topic=37648.0;wap2
"""
# break the Hex string into a list of bytes
byte_list = [int(hex_string[i:i+2], 16) for i in range(0, len(hex_string), 2)]
crc = 0
for inbyte in byte_list:
for j in range(8):
mix = (crc ^ inbyte) & 0x01
crc = crc >> 1
if mix:
crc = crc ^ 0xA001
inbyte = inbyte >> 1;
return crc==0xB001
# --------- End CRC Calculations ---------------
class HA7_port(serial.Serial):
'''Class representing a Serial Port with an HA7S connected to it. Subclass of the
standard pySerial serial port class, so all of the pySerial port methods are
available as well. This class implements some basic methods for communicating
with the HA7S.
'''
def __init__(self, port_name):
'''Open the port using the 'port_name' port, e.g. 'COM29', '/dev/ttyUSB0'.
'''
super(HA7_port, self).__init__(port_name, baudrate=9600, timeout=0.2)
time.sleep(0.5) # HA7S sometimes doesn't wake up fast enough.
def __del__(self):
"""Close port when object is destroyed.
"""
try:
self.close()
except:
pass
def readline(self):
'''Overrides the pySerial readline() method, because that metho reads until
a \n is received. Instead, this method reads until a <CR> is received and
returns the results, *not* including the <CR>. Also returns if a read timeout
occurs.
'''
ret = ''
while True:
c = self.read(1)
if len(c)==0 or c=='\r':
return ret
ret += c
def reset(self):
'''Issues a one-wire reset command. Returns nothing.
'''
self.write('R')
self.readline()
regex_dev = re.compile('^[0-9A-F]{16}$')
def device_list(self):
'''Returns a list of devices found on the one-wire bus.
Each item in the returned list is a dictionary with characteristics
of the found device.
'''
# list of devices found
dev_list = []
# find the first device, but try three times if nothing
# shows up.
for i in range(3):
self.flushInput()
self.write('S')
lin = self.readline()
if HA7_port.regex_dev.search(lin):
break
else:
time.sleep(0.4)
while HA7_port.regex_dev.search(lin):
rec = {}
# unreverse the address
rec['addr'] = ''
for i in range(0, 16, 2):
rec['addr'] += lin[14-i:14-i+2]
# only add this to the list if the CRC passes
if crc8_is_OK(rec['addr']):
rec['family'] = lin[-2:] # family code
# make the id in the format used by the BMS system
rec['id'] = '%s.%s' % (rec['family'], rec['addr'][2:-2])
dev_list.append(rec)
self.write('s')
lin = self.readline()
return dev_list
def write_bytes(self, hex_str):
'''Uses the 'W' command of the HA7S to write bytes to the one-wire
bus. 'hex_str' are the bytes to be written, expressed in HEX format,
all caps. The function returns all of the bytes, as a HEX string, read
from the bus up to but not including the terminating <CR>.
'''
# Determine the length of the string to be written, expressed
# in HEX, two digits, without leading 0x.
byte_len = hex(len(hex_str)/2)[2:].upper()
if len(byte_len)==1:
byte_len = '0' + byte_len
self.write('W%s%s\r' % (byte_len, hex_str))
return self.readline()
class HA7Sreader(base_reader.Reader):
"""Class that reads the sensors on a 1-wire bus with a HA7S master.
The read() method performs the read action.
"""
def __init__(self, settings=None):
"""'settings' is the general settings file for the application.
"""
# Call constructor of base class
super(HA7Sreader, self).__init__(settings)
# find the FTDI port that connects to the HA7S, and then
# remove it from the list of available FTDI ports.
# Go through this process three times in case it doesn't
# find a port on the first pass (I've seen the 'S' function
# fail).
# NOTE: this process depends on there being at least one 1-wire
# device connected to the bus.
self.port_path = None
for i in range(3):
for p_path in base_reader.Reader.available_ftdi_ports:
try:
port = HA7_port(p_path)
port.flushInput()
port.write('S') # searches for first device
res = port.readline()
if HA7_port.regex_dev.search(res):
self.port_path = p_path
# remove port from the available list
base_reader.Reader.available_ftdi_ports.remove(p_path)
break
except:
pass
finally:
port.close()
# if arrived here due to success, break out of the 3 times loop
if self.port_path:
break
regex_temp = re.compile('^[0-9A-F]{18}$')
def readTemp(self, port, addr):
'''Returns the temperature in Fahrenheit read from the Dallas
DS18B20 with the 16 hex digit ROM code of 'addr'. 'port' is an open
HA7_port port.
**Note: this command assumes the Temperature Convert command, 0x44h
has already been issued and completed.
'''
port.reset()
port.write_bytes('55%sBE' % addr)
ret = port.write_bytes('FF' * 9) # read entire Scratchpad in order to enable CRC check
# Make sure return value from device is valid.
if HA7Sreader.regex_temp.search(ret):
# Ensure CRC is OK
if not crc8_is_OK(ret):
raise Exception('Bad CRC calculating temperature for %s. Return bytes were: %s' % (addr, ret))
sign = ret[2] # the sign HEX digit
val = int(ret[3] + ret[:2], 16)
if sign=='F':
# this is a negative temperature expressed in
# 12-bit twos complement
val = val - (1<<12)
# vals is now 1/16ths of a degree C, so convert to F
return 32.0 + 1.8 * val/16.0
else:
raise Exception('Bad 1-wire Temperature Return Value: %s' % ret)
regex_io = re.compile('^[0-9A-F]{8}$')
def readIO(self, port, addr):
"""Returns the sensed level of IO channel A on a DS2406 device.
'port' is an open HA7_port.
'addr' is the 16 hex digit ROM code of the DS2406, using capital
letters for the non-numeric hex codes.
"""
port.reset()
cmd = 'F5C5FF'
port.write_bytes('55%s%s' % (addr, cmd))
# reads Channel Info byte, another Unknown byte, + two CRC16 bytes
ret = port.write_bytes('FF'*4)
if HA7Sreader.regex_io.search(ret):
if not crc16_is_OK(cmd + ret):
raise Exception('Bad CRC reading Input for %s. Return bytes were: %s' % (addr, ret))
val = int(ret[:2], 16) & 4
return 1 if val else 0
else:
raise Exception('Bad 1-wire DS2406 Return Value: %s' % ret)
def read(self):
"""Read the 1-wire sensors attached to the HA7S.
Only certain sensor families are supported: 28 (DS18B20) and 12 (DS2406)
Returns a list of readings. The reading list consists of
4-tuples of the form (UNIX timestamp in seconds, reading ID string,
reading value, reading type which is a value from the 'base_reader' module.
"""
if self.port_path:
port = HA7_port(self.port_path)
else:
raise Exception('No HA7S connected.')
try: # for making sure port is closed
# the reading list to return.
readings = []
# Use the same timestamp for all the readings
ts = time.time()
# Get list of connected devices and issue Temperature Convert command
# for all devices if any of them are DS18B20s.
devices = port.device_list()
temp_device_ct = len([rec for rec in devices if rec['family']=='28'])
if temp_device_ct > 19:
# HA7S can only supply 35 mA
raise Exception('Too many one-wire temperature devices on bus.')
if temp_device_ct:
port.reset()
port.write_bytes('CC44') # issues temperature convert to all temp devices
time.sleep(1.0) # long enough for convert to occur.
port.flushInput()
for rec in devices:
try:
if rec['family']=='28':
val = self.readTemp(port, rec['addr'])
read_type = base_reader.VALUE
elif rec['family']=='12':
val = self.readIO(port, rec['addr'])
read_type = base_reader.STATE
readings.append( (ts, rec['id'], val, read_type) )
except:
logging.exception('Error reading 1-wire sensor %s' % rec['id'])
finally:
port.close()
return readings
if __name__=='__main__':
from pprint import pprint
reader = HA7Sreader()
pprint(reader.read())
|
the-stack_106_25026 | import json
import sys
from django.core.serializers.base import DeserializationError
from django.core.serializers.json import Serializer as JSONSerializer
from djmoney.money import Money
from .models.fields import MoneyField
from .utils import get_currency_field_name
Serializer = JSONSerializer
def Deserializer(stream_or_string, **options): # noqa
"""
Deserialize a stream or string of JSON data.
"""
# Local imports to allow using modified versions of `_get_model`
# It could be patched in runtime via `unittest.mock.patch` for example
from django.core.serializers.python import Deserializer as PythonDeserializer, _get_model
ignore = options.pop("ignorenonexistent", False)
if not isinstance(stream_or_string, (bytes, str)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode("utf-8")
try:
for obj in json.loads(stream_or_string):
try:
Model = _get_model(obj["model"])
except DeserializationError:
if ignore:
continue
else:
raise
money_fields = {}
fields = {}
field_names = {field.name for field in Model._meta.get_fields()}
for (field_name, field_value) in obj["fields"].items():
if ignore and field_name not in field_names:
# skip fields no longer on model
continue
field = Model._meta.get_field(field_name)
if isinstance(field, MoneyField) and field_value is not None:
money_fields[field_name] = Money(
field_value, obj["fields"][get_currency_field_name(field_name, field)]
)
else:
fields[field_name] = field_value
obj["fields"] = fields
for inner_obj in PythonDeserializer([obj], **options):
for field, value in money_fields.items():
setattr(inner_obj.object, field, value)
yield inner_obj
except (GeneratorExit, DeserializationError):
raise
except Exception as exc:
raise DeserializationError.with_traceback(DeserializationError(exc), sys.exc_info()[2])
|
the-stack_106_25030 | # -*- test-case-name: twisted.conch.test.test_keys -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Handling of RSA, DSA, ECDSA, and Ed25519 keys.
"""
from __future__ import absolute_import, division
import binascii
import itertools
from hashlib import md5, sha256
import base64
import struct
import warnings
import bcrypt
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
dsa, ec, ed25519, padding, rsa)
from cryptography.hazmat.primitives.serialization import (
load_pem_private_key, load_ssh_public_key)
from cryptography import utils
try:
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_dss_signature, decode_dss_signature)
except ImportError:
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_rfc6979_signature as encode_dss_signature,
decode_rfc6979_signature as decode_dss_signature)
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from pyasn1.error import PyAsn1Error
from pyasn1.type import univ
from pyasn1.codec.ber import decoder as berDecoder
from pyasn1.codec.ber import encoder as berEncoder
from twisted.conch.ssh import common, sexpy
from twisted.conch.ssh.common import int_from_bytes, int_to_bytes
from twisted.python import randbytes
from twisted.python.compat import (
iterbytes, long, izip, nativeString, unicode, _PY3,
_b64decodebytes as decodebytes, _b64encodebytes as encodebytes,
_bytesChr as chr)
from twisted.python.constants import NamedConstant, Names
from twisted.python.deprecate import _mutuallyExclusiveArguments
# Curve lookup table
_curveTable = {
b'ecdsa-sha2-nistp256': ec.SECP256R1(),
b'ecdsa-sha2-nistp384': ec.SECP384R1(),
b'ecdsa-sha2-nistp521': ec.SECP521R1(),
}
_secToNist = {
b'secp256r1' : b'nistp256',
b'secp384r1' : b'nistp384',
b'secp521r1' : b'nistp521',
}
class BadKeyError(Exception):
"""
Raised when a key isn't what we expected from it.
XXX: we really need to check for bad keys
"""
class EncryptedKeyError(Exception):
"""
Raised when an encrypted key is presented to fromString/fromFile without
a password.
"""
class BadFingerPrintFormat(Exception):
"""
Raises when unsupported fingerprint formats are presented to fingerprint.
"""
class FingerprintFormats(Names):
"""
Constants representing the supported formats of key fingerprints.
@cvar MD5_HEX: Named constant representing fingerprint format generated
using md5[RFC1321] algorithm in hexadecimal encoding.
@type MD5_HEX: L{twisted.python.constants.NamedConstant}
@cvar SHA256_BASE64: Named constant representing fingerprint format
generated using sha256[RFC4634] algorithm in base64 encoding
@type SHA256_BASE64: L{twisted.python.constants.NamedConstant}
"""
MD5_HEX = NamedConstant()
SHA256_BASE64 = NamedConstant()
class Key(object):
"""
An object representing a key. A key can be either a public or
private key. A public key can verify a signature; a private key can
create or verify a signature. To generate a string that can be stored
on disk, use the toString method. If you have a private key, but want
the string representation of the public key, use Key.public().toString().
"""
@classmethod
def fromFile(cls, filename, type=None, passphrase=None):
"""
Load a key from a file.
@param filename: The path to load key data from.
@type type: L{str} or L{None}
@param type: A string describing the format the key data is in, or
L{None} to attempt detection of the type.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase the key is encrypted with, or L{None}
if there is no encryption.
@rtype: L{Key}
@return: The loaded key.
"""
with open(filename, 'rb') as f:
return cls.fromString(f.read(), type, passphrase)
@classmethod
def fromString(cls, data, type=None, passphrase=None):
"""
Return a Key object corresponding to the string data.
type is optionally the type of string, matching a _fromString_*
method. Otherwise, the _guessStringType() classmethod will be used
to guess a type. If the key is encrypted, passphrase is used as
the decryption key.
@type data: L{bytes}
@param data: The key data.
@type type: L{str} or L{None}
@param type: A string describing the format the key data is in, or
L{None} to attempt detection of the type.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase the key is encrypted with, or L{None}
if there is no encryption.
@rtype: L{Key}
@return: The loaded key.
"""
if isinstance(data, unicode):
data = data.encode("utf-8")
if isinstance(passphrase, unicode):
passphrase = passphrase.encode("utf-8")
if type is None:
type = cls._guessStringType(data)
if type is None:
raise BadKeyError('cannot guess the type of %r' % (data,))
method = getattr(cls, '_fromString_%s' % (type.upper(),), None)
if method is None:
raise BadKeyError('no _fromString method for %s' % (type,))
if method.__code__.co_argcount == 2: # No passphrase
if passphrase:
raise BadKeyError('key not encrypted')
return method(data)
else:
return method(data, passphrase)
@classmethod
def _fromString_BLOB(cls, blob):
"""
Return a public key object corresponding to this public key blob.
The format of a RSA public key blob is::
string 'ssh-rsa'
integer e
integer n
The format of a DSA public key blob is::
string 'ssh-dss'
integer p
integer q
integer g
integer y
The format of ECDSA-SHA2-* public key blob is::
string 'ecdsa-sha2-[identifier]'
integer x
integer y
identifier is the standard NIST curve name.
The format of an Ed25519 public key blob is::
string 'ssh-ed25519'
string a
@type blob: L{bytes}
@param blob: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type (the first string) is unknown.
"""
keyType, rest = common.getNS(blob)
if keyType == b'ssh-rsa':
e, n, rest = common.getMP(rest, 2)
return cls(
rsa.RSAPublicNumbers(e, n).public_key(default_backend()))
elif keyType == b'ssh-dss':
p, q, g, y, rest = common.getMP(rest, 4)
return cls(
dsa.DSAPublicNumbers(
y=y,
parameter_numbers=dsa.DSAParameterNumbers(
p=p,
q=q,
g=g
)
).public_key(default_backend())
)
elif keyType in _curveTable:
return cls(
ec.EllipticCurvePublicKey.from_encoded_point(
_curveTable[keyType], common.getNS(rest, 2)[1]
)
)
elif keyType == b'ssh-ed25519':
a, rest = common.getNS(rest)
return cls._fromEd25519Components(a)
else:
raise BadKeyError('unknown blob type: %s' % (keyType,))
@classmethod
def _fromString_PRIVATE_BLOB(cls, blob):
"""
Return a private key object corresponding to this private key blob.
The blob formats are as follows:
RSA keys::
string 'ssh-rsa'
integer n
integer e
integer d
integer u
integer p
integer q
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
EC keys::
string 'ecdsa-sha2-[identifier]'
string identifier
string q
integer privateValue
identifier is the standard NIST curve name.
Ed25519 keys::
string 'ssh-ed25519'
string a
string k || a
@type blob: L{bytes}
@param blob: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if
* the key type (the first string) is unknown
* the curve name of an ECDSA key does not match the key type
"""
keyType, rest = common.getNS(blob)
if keyType == b'ssh-rsa':
n, e, d, u, p, q, rest = common.getMP(rest, 6)
return cls._fromRSAComponents(n=n, e=e, d=d, p=p, q=q)
elif keyType == b'ssh-dss':
p, q, g, y, x, rest = common.getMP(rest, 5)
return cls._fromDSAComponents(y=y, g=g, p=p, q=q, x=x)
elif keyType in _curveTable:
curve = _curveTable[keyType]
curveName, q, rest = common.getNS(rest, 2)
if curveName != _secToNist[curve.name.encode('ascii')]:
raise BadKeyError('ECDSA curve name %r does not match key '
'type %r' % (curveName, keyType))
privateValue, rest = common.getMP(rest)
return cls._fromECEncodedPoint(
encodedPoint=q, curve=keyType, privateValue=privateValue)
elif keyType == b'ssh-ed25519':
# OpenSSH's format repeats the public key bytes for some reason.
# We're only interested in the private key here anyway.
a, combined, rest = common.getNS(rest, 2)
k = combined[:32]
return cls._fromEd25519Components(a, k=k)
else:
raise BadKeyError('unknown blob type: %s' % (keyType,))
@classmethod
def _fromString_PUBLIC_OPENSSH(cls, data):
"""
Return a public key object corresponding to this OpenSSH public key
string. The format of an OpenSSH public key string is::
<key type> <base64-encoded public key blob>
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the blob type is unknown.
"""
# ECDSA keys don't need base64 decoding which is required
# for RSA or DSA key.
if data.startswith(b'ecdsa-sha2'):
return cls(load_ssh_public_key(data, default_backend()))
blob = decodebytes(data.split()[1])
return cls._fromString_BLOB(blob)
@classmethod
def _fromPrivateOpenSSH_v1(cls, data, passphrase):
"""
Return a private key object corresponding to this OpenSSH private key
string, in the "openssh-key-v1" format introduced in OpenSSH 6.5.
The format of an openssh-key-v1 private key string is::
-----BEGIN OPENSSH PRIVATE KEY-----
<base64-encoded SSH protocol string>
-----END OPENSSH PRIVATE KEY-----
The SSH protocol string is as described in
U{PROTOCOL.key<https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.key>}.
@type data: L{bytes}
@param data: The key data.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase the key is encrypted with, or L{None}
if it is not encrypted.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if
* a passphrase is provided for an unencrypted key
* the SSH protocol encoding is incorrect
@raises EncryptedKeyError: if
* a passphrase is not provided for an encrypted key
"""
lines = data.strip().splitlines()
keyList = decodebytes(b''.join(lines[1:-1]))
if not keyList.startswith(b'openssh-key-v1\0'):
raise BadKeyError('unknown OpenSSH private key format')
keyList = keyList[len(b'openssh-key-v1\0'):]
cipher, kdf, kdfOptions, rest = common.getNS(keyList, 3)
n = struct.unpack('!L', rest[:4])[0]
if n != 1:
raise BadKeyError('only OpenSSH private key files containing '
'a single key are supported')
# Ignore public key
_, encPrivKeyList, _ = common.getNS(rest[4:], 2)
if cipher != b'none':
if not passphrase:
raise EncryptedKeyError('Passphrase must be provided '
'for an encrypted key')
# Determine cipher
if cipher in (b'aes128-ctr', b'aes192-ctr', b'aes256-ctr'):
algorithmClass = algorithms.AES
blockSize = 16
keySize = int(cipher[3:6]) // 8
ivSize = blockSize
else:
raise BadKeyError('unknown encryption type %r' % (cipher,))
if kdf == b'bcrypt':
salt, rest = common.getNS(kdfOptions)
rounds = struct.unpack('!L', rest[:4])[0]
decKey = bcrypt.kdf(
passphrase, salt, keySize + ivSize, rounds,
# We can only use the number of rounds that OpenSSH used.
ignore_few_rounds=True)
else:
raise BadKeyError('unknown KDF type %r' % (kdf,))
if (len(encPrivKeyList) % blockSize) != 0:
raise BadKeyError('bad padding')
decryptor = Cipher(
algorithmClass(decKey[:keySize]),
modes.CTR(decKey[keySize:keySize + ivSize]),
backend=default_backend()
).decryptor()
privKeyList = (
decryptor.update(encPrivKeyList) + decryptor.finalize())
else:
if kdf != b'none':
raise BadKeyError('private key specifies KDF %r but no '
'cipher' % (kdf,))
privKeyList = encPrivKeyList
check1 = struct.unpack('!L', privKeyList[:4])[0]
check2 = struct.unpack('!L', privKeyList[4:8])[0]
if check1 != check2:
raise BadKeyError('check values do not match: %d != %d' %
(check1, check2))
return cls._fromString_PRIVATE_BLOB(privKeyList[8:])
@classmethod
def _fromPrivateOpenSSH_PEM(cls, data, passphrase):
"""
Return a private key object corresponding to this OpenSSH private key
string, in the old PEM-based format.
The format of a PEM-based OpenSSH private key string is::
-----BEGIN <key type> PRIVATE KEY-----
[Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,<initialization value>]
<base64-encoded ASN.1 structure>
------END <key type> PRIVATE KEY------
The ASN.1 structure of a RSA key is::
(0, n, e, d, p, q)
The ASN.1 structure of a DSA key is::
(0, p, q, g, y, x)
The ASN.1 structure of a ECDSA key is::
(ECParameters, OID, NULL)
@type data: L{bytes}
@param data: The key data.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase the key is encrypted with, or L{None}
if it is not encrypted.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if
* a passphrase is provided for an unencrypted key
* the ASN.1 encoding is incorrect
@raises EncryptedKeyError: if
* a passphrase is not provided for an encrypted key
"""
lines = data.strip().splitlines()
kind = lines[0][11:-17]
if lines[1].startswith(b'Proc-Type: 4,ENCRYPTED'):
if not passphrase:
raise EncryptedKeyError('Passphrase must be provided '
'for an encrypted key')
# Determine cipher and initialization vector
try:
_, cipherIVInfo = lines[2].split(b' ', 1)
cipher, ivdata = cipherIVInfo.rstrip().split(b',', 1)
except ValueError:
raise BadKeyError('invalid DEK-info %r' % (lines[2],))
if cipher in (b'AES-128-CBC', b'AES-256-CBC'):
algorithmClass = algorithms.AES
keySize = int(cipher.split(b'-')[1]) // 8
if len(ivdata) != 32:
raise BadKeyError('AES encrypted key with a bad IV')
elif cipher == b'DES-EDE3-CBC':
algorithmClass = algorithms.TripleDES
keySize = 24
if len(ivdata) != 16:
raise BadKeyError('DES encrypted key with a bad IV')
else:
raise BadKeyError('unknown encryption type %r' % (cipher,))
# Extract keyData for decoding
iv = bytes(bytearray([int(ivdata[i:i + 2], 16)
for i in range(0, len(ivdata), 2)]))
ba = md5(passphrase + iv[:8]).digest()
bb = md5(ba + passphrase + iv[:8]).digest()
decKey = (ba + bb)[:keySize]
b64Data = decodebytes(b''.join(lines[3:-1]))
decryptor = Cipher(
algorithmClass(decKey),
modes.CBC(iv),
backend=default_backend()
).decryptor()
keyData = decryptor.update(b64Data) + decryptor.finalize()
removeLen = ord(keyData[-1:])
keyData = keyData[:-removeLen]
else:
b64Data = b''.join(lines[1:-1])
keyData = decodebytes(b64Data)
try:
decodedKey = berDecoder.decode(keyData)[0]
except PyAsn1Error as e:
raise BadKeyError(
'Failed to decode key (Bad Passphrase?): %s' % (e,))
if kind == b'EC':
return cls(
load_pem_private_key(data, passphrase, default_backend()))
if kind == b'RSA':
if len(decodedKey) == 2: # Alternate RSA key
decodedKey = decodedKey[0]
if len(decodedKey) < 6:
raise BadKeyError('RSA key failed to decode properly')
n, e, d, p, q, dmp1, dmq1, iqmp = [
long(value) for value in decodedKey[1:9]
]
return cls(
rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=dmp1,
dmq1=dmq1,
iqmp=iqmp,
public_numbers=rsa.RSAPublicNumbers(e=e, n=n),
).private_key(default_backend())
)
elif kind == b'DSA':
p, q, g, y, x = [long(value) for value in decodedKey[1: 6]]
if len(decodedKey) < 6:
raise BadKeyError('DSA key failed to decode properly')
return cls(
dsa.DSAPrivateNumbers(
x=x,
public_numbers=dsa.DSAPublicNumbers(
y=y,
parameter_numbers=dsa.DSAParameterNumbers(
p=p,
q=q,
g=g
)
)
).private_key(backend=default_backend())
)
else:
raise BadKeyError("unknown key type %s" % (kind,))
@classmethod
def _fromString_PRIVATE_OPENSSH(cls, data, passphrase):
"""
Return a private key object corresponding to this OpenSSH private key
string. If the key is encrypted, passphrase MUST be provided.
Providing a passphrase for an unencrypted key is an error.
@type data: L{bytes}
@param data: The key data.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase the key is encrypted with, or L{None}
if it is not encrypted.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if
* a passphrase is provided for an unencrypted key
* the encoding is incorrect
@raises EncryptedKeyError: if
* a passphrase is not provided for an encrypted key
"""
if data.strip().splitlines()[0][11:-17] == b'OPENSSH':
# New-format (openssh-key-v1) key
return cls._fromPrivateOpenSSH_v1(data, passphrase)
else:
# Old-format (PEM) key
return cls._fromPrivateOpenSSH_PEM(data, passphrase)
@classmethod
def _fromString_PUBLIC_LSH(cls, data):
"""
Return a public key corresponding to this LSH public key string.
The LSH public key string format is::
<s-expression: ('public-key', (<key type>, (<name, <value>)+))>
The names for a RSA (key type 'rsa-pkcs1-sha1') key are: n, e.
The names for a DSA (key type 'dsa') key are: y, g, p, q.
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type is unknown
"""
sexp = sexpy.parse(decodebytes(data[1:-1]))
assert sexp[0] == b'public-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == b'dsa':
return cls._fromDSAComponents(
y=kd[b'y'], g=kd[b'g'], p=kd[b'p'], q=kd[b'q'])
elif sexp[1][0] == b'rsa-pkcs1-sha1':
return cls._fromRSAComponents(n=kd[b'n'], e=kd[b'e'])
else:
raise BadKeyError('unknown lsh key type %s' % (sexp[1][0],))
@classmethod
def _fromString_PRIVATE_LSH(cls, data):
"""
Return a private key corresponding to this LSH private key string.
The LSH private key string format is::
<s-expression: ('private-key', (<key type>, (<name>, <value>)+))>
The names for a RSA (key type 'rsa-pkcs1-sha1') key are: n, e, d, p, q.
The names for a DSA (key type 'dsa') key are: y, g, p, q, x.
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type is unknown
"""
sexp = sexpy.parse(data)
assert sexp[0] == b'private-key'
kd = {}
for name, data in sexp[1][1:]:
kd[name] = common.getMP(common.NS(data))[0]
if sexp[1][0] == b'dsa':
assert len(kd) == 5, len(kd)
return cls._fromDSAComponents(
y=kd[b'y'], g=kd[b'g'], p=kd[b'p'], q=kd[b'q'], x=kd[b'x'])
elif sexp[1][0] == b'rsa-pkcs1':
assert len(kd) == 8, len(kd)
if kd[b'p'] > kd[b'q']: # Make p smaller than q
kd[b'p'], kd[b'q'] = kd[b'q'], kd[b'p']
return cls._fromRSAComponents(
n=kd[b'n'], e=kd[b'e'], d=kd[b'd'], p=kd[b'p'], q=kd[b'q'])
else:
raise BadKeyError('unknown lsh key type %s' % (sexp[1][0],))
@classmethod
def _fromString_AGENTV3(cls, data):
"""
Return a private key object corresponsing to the Secure Shell Key
Agent v3 format.
The SSH Key Agent v3 format for a RSA key is::
string 'ssh-rsa'
integer e
integer d
integer n
integer u
integer p
integer q
The SSH Key Agent v3 format for a DSA key is::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
@type data: L{bytes}
@param data: The key data.
@return: A new key.
@rtype: L{twisted.conch.ssh.keys.Key}
@raises BadKeyError: if the key type (the first string) is unknown
"""
keyType, data = common.getNS(data)
if keyType == b'ssh-dss':
p, data = common.getMP(data)
q, data = common.getMP(data)
g, data = common.getMP(data)
y, data = common.getMP(data)
x, data = common.getMP(data)
return cls._fromDSAComponents(y=y, g=g, p=p, q=q, x=x)
elif keyType == b'ssh-rsa':
e, data = common.getMP(data)
d, data = common.getMP(data)
n, data = common.getMP(data)
u, data = common.getMP(data)
p, data = common.getMP(data)
q, data = common.getMP(data)
return cls._fromRSAComponents(n=n, e=e, d=d, p=p, q=q, u=u)
else:
raise BadKeyError("unknown key type %s" % (keyType,))
@classmethod
def _guessStringType(cls, data):
"""
Guess the type of key in data. The types map to _fromString_*
methods.
@type data: L{bytes}
@param data: The key data.
"""
if data.startswith(b'ssh-') or data.startswith(b'ecdsa-sha2-'):
return 'public_openssh'
elif data.startswith(b'-----BEGIN'):
return 'private_openssh'
elif data.startswith(b'{'):
return 'public_lsh'
elif data.startswith(b'('):
return 'private_lsh'
elif (data.startswith(b'\x00\x00\x00\x07ssh-') or
data.startswith(b'\x00\x00\x00\x13ecdsa-') or
data.startswith(b'\x00\x00\x00\x0bssh-ed25519')):
ignored, rest = common.getNS(data)
count = 0
while rest:
count += 1
ignored, rest = common.getMP(rest)
if count > 4:
return 'agentv3'
else:
return 'blob'
@classmethod
def _fromRSAComponents(cls, n, e, d=None, p=None, q=None, u=None):
"""
Build a key from RSA numerical components.
@type n: L{int}
@param n: The 'n' RSA variable.
@type e: L{int}
@param e: The 'e' RSA variable.
@type d: L{int} or L{None}
@param d: The 'd' RSA variable (optional for a public key).
@type p: L{int} or L{None}
@param p: The 'p' RSA variable (optional for a public key).
@type q: L{int} or L{None}
@param q: The 'q' RSA variable (optional for a public key).
@type u: L{int} or L{None}
@param u: The 'u' RSA variable. Ignored, as its value is determined by
p and q.
@rtype: L{Key}
@return: An RSA key constructed from the values as given.
"""
publicNumbers = rsa.RSAPublicNumbers(e=e, n=n)
if d is None:
# We have public components.
keyObject = publicNumbers.public_key(default_backend())
else:
privateNumbers = rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=rsa.rsa_crt_dmp1(d, p),
dmq1=rsa.rsa_crt_dmq1(d, q),
iqmp=rsa.rsa_crt_iqmp(p, q),
public_numbers=publicNumbers,
)
keyObject = privateNumbers.private_key(default_backend())
return cls(keyObject)
@classmethod
def _fromDSAComponents(cls, y, p, q, g, x=None):
"""
Build a key from DSA numerical components.
@type y: L{int}
@param y: The 'y' DSA variable.
@type p: L{int}
@param p: The 'p' DSA variable.
@type q: L{int}
@param q: The 'q' DSA variable.
@type g: L{int}
@param g: The 'g' DSA variable.
@type x: L{int} or L{None}
@param x: The 'x' DSA variable (optional for a public key)
@rtype: L{Key}
@return: A DSA key constructed from the values as given.
"""
publicNumbers = dsa.DSAPublicNumbers(
y=y, parameter_numbers=dsa.DSAParameterNumbers(p=p, q=q, g=g))
if x is None:
# We have public components.
keyObject = publicNumbers.public_key(default_backend())
else:
privateNumbers = dsa.DSAPrivateNumbers(
x=x, public_numbers=publicNumbers)
keyObject = privateNumbers.private_key(default_backend())
return cls(keyObject)
@classmethod
def _fromECComponents(cls, x, y, curve, privateValue=None):
"""
Build a key from EC components.
@param x: The affine x component of the public point used for verifying.
@type x: L{int}
@param y: The affine y component of the public point used for verifying.
@type y: L{int}
@param curve: NIST name of elliptic curve.
@type curve: L{bytes}
@param privateValue: The private value.
@type privateValue: L{int}
"""
publicNumbers = ec.EllipticCurvePublicNumbers(
x=x, y=y, curve=_curveTable[curve])
if privateValue is None:
# We have public components.
keyObject = publicNumbers.public_key(default_backend())
else:
privateNumbers = ec.EllipticCurvePrivateNumbers(
private_value=privateValue, public_numbers=publicNumbers)
keyObject = privateNumbers.private_key(default_backend())
return cls(keyObject)
@classmethod
def _fromECEncodedPoint(cls, encodedPoint, curve, privateValue=None):
"""
Build a key from an EC encoded point.
@param encodedPoint: The public point encoded as in SEC 1 v2.0
section 2.3.3.
@type encodedPoint: L{bytes}
@param curve: NIST name of elliptic curve.
@type curve: L{bytes}
@param privateValue: The private value.
@type privateValue: L{int}
"""
if privateValue is None:
# We have public components.
keyObject = ec.EllipticCurvePublicKey.from_encoded_point(
_curveTable[curve], encodedPoint
)
else:
keyObject = ec.derive_private_key(
privateValue, _curveTable[curve], default_backend()
)
return cls(keyObject)
@classmethod
def _fromEd25519Components(cls, a, k=None):
"""Build a key from Ed25519 components.
@param a: The Ed25519 public key, as defined in RFC 8032 section
5.1.5.
@type a: L{bytes}
@param k: The Ed25519 private key, as defined in RFC 8032 section
5.1.5.
@type k: L{bytes}
"""
if k is None:
keyObject = ed25519.Ed25519PublicKey.from_public_bytes(a)
else:
keyObject = ed25519.Ed25519PrivateKey.from_private_bytes(k)
return cls(keyObject)
def __init__(self, keyObject):
"""
Initialize with a private or public
C{cryptography.hazmat.primitives.asymmetric} key.
@param keyObject: Low level key.
@type keyObject: C{cryptography.hazmat.primitives.asymmetric} key.
"""
self._keyObject = keyObject
def __eq__(self, other):
"""
Return True if other represents an object with the same key.
"""
if type(self) == type(other):
return self.type() == other.type() and self.data() == other.data()
else:
return NotImplemented
def __ne__(self, other):
"""
Return True if other represents anything other than this key.
"""
result = self.__eq__(other)
if result == NotImplemented:
return result
return not result
def __repr__(self):
"""
Return a pretty representation of this object.
"""
if self.type() == 'EC':
data = self.data()
name = data['curve'].decode('utf-8')
if self.isPublic():
out = '<Elliptic Curve Public Key (%s bits)' % (name[-3:],)
else:
out = '<Elliptic Curve Private Key (%s bits)' % (name[-3:],)
for k, v in sorted(data.items()):
if _PY3 and k == 'curve':
out += "\ncurve:\n\t%s" % (name,)
else:
out += "\n%s:\n\t%s" % (k, v)
return out + ">\n"
else:
lines = [
'<%s %s (%s bits)' % (
nativeString(self.type()),
self.isPublic() and 'Public Key' or 'Private Key',
self.size())]
for k, v in sorted(self.data().items()):
lines.append('attr %s:' % (k,))
by = v if self.type() == 'Ed25519' else common.MP(v)[4:]
while by:
m = by[:15]
by = by[15:]
o = ''
for c in iterbytes(m):
o = o + '%02x:' % (ord(c),)
if len(m) < 15:
o = o[:-1]
lines.append('\t' + o)
lines[-1] = lines[-1] + '>'
return '\n'.join(lines)
def isPublic(self):
"""
Check if this instance is a public key.
@return: C{True} if this is a public key.
"""
return isinstance(
self._keyObject,
(rsa.RSAPublicKey, dsa.DSAPublicKey, ec.EllipticCurvePublicKey,
ed25519.Ed25519PublicKey))
def public(self):
"""
Returns a version of this key containing only the public key data.
If this is a public key, this may or may not be the same object
as self.
@rtype: L{Key}
@return: A public key.
"""
if self.isPublic():
return self
else:
return Key(self._keyObject.public_key())
def fingerprint(self, format=FingerprintFormats.MD5_HEX):
"""
The fingerprint of a public key consists of the output of the
message-digest algorithm in the specified format.
Supported formats include L{FingerprintFormats.MD5_HEX} and
L{FingerprintFormats.SHA256_BASE64}
The input to the algorithm is the public key data as specified by [RFC4253].
The output of sha256[RFC4634] algorithm is presented to the
user in the form of base64 encoded sha256 hashes.
Example: C{US5jTUa0kgX5ZxdqaGF0yGRu8EgKXHNmoT8jHKo1StM=}
The output of the MD5[RFC1321](default) algorithm is presented to the user as
a sequence of 16 octets printed as hexadecimal with lowercase letters
and separated by colons.
Example: C{c1:b1:30:29:d7:b8:de:6c:97:77:10:d7:46:41:63:87}
@param format: Format for fingerprint generation. Consists
hash function and representation format.
Default is L{FingerprintFormats.MD5_HEX}
@since: 8.2
@return: the user presentation of this L{Key}'s fingerprint, as a
string.
@rtype: L{str}
"""
if format is FingerprintFormats.SHA256_BASE64:
return nativeString(base64.b64encode(
sha256(self.blob()).digest()))
elif format is FingerprintFormats.MD5_HEX:
return nativeString(
b':'.join([binascii.hexlify(x)
for x in iterbytes(md5(self.blob()).digest())]))
else:
raise BadFingerPrintFormat(
'Unsupported fingerprint format: %s' % (format,))
def type(self):
"""
Return the type of the object we wrap. Currently this can only be
'RSA', 'DSA', 'EC', or 'Ed25519'.
@rtype: L{str}
@raises RuntimeError: If the object type is unknown.
"""
if isinstance(
self._keyObject, (rsa.RSAPublicKey, rsa.RSAPrivateKey)):
return 'RSA'
elif isinstance(
self._keyObject, (dsa.DSAPublicKey, dsa.DSAPrivateKey)):
return 'DSA'
elif isinstance(
self._keyObject,
(ec.EllipticCurvePublicKey, ec.EllipticCurvePrivateKey)):
return 'EC'
elif isinstance(
self._keyObject,
(ed25519.Ed25519PublicKey, ed25519.Ed25519PrivateKey)):
return 'Ed25519'
else:
raise RuntimeError(
'unknown type of object: %r' % (self._keyObject,))
def sshType(self):
"""
Get the type of the object we wrap as defined in the SSH protocol,
defined in RFC 4253, Section 6.6. Currently this can only be b'ssh-rsa',
b'ssh-dss' or b'ecdsa-sha2-[identifier]'.
identifier is the standard NIST curve name
@return: The key type format.
@rtype: L{bytes}
"""
if self.type() == 'EC':
return (
b'ecdsa-sha2-' +
_secToNist[self._keyObject.curve.name.encode('ascii')])
else:
return {
'RSA': b'ssh-rsa',
'DSA': b'ssh-dss',
'Ed25519': b'ssh-ed25519',
}[self.type()]
def size(self):
"""
Return the size of the object we wrap.
@return: The size of the key.
@rtype: L{int}
"""
if self._keyObject is None:
return 0
elif self.type() == 'EC':
return self._keyObject.curve.key_size
elif self.type() == 'Ed25519':
return 256
return self._keyObject.key_size
def data(self):
"""
Return the values of the public key as a dictionary.
@rtype: L{dict}
"""
if isinstance(self._keyObject, rsa.RSAPublicKey):
numbers = self._keyObject.public_numbers()
return {
"n": numbers.n,
"e": numbers.e,
}
elif isinstance(self._keyObject, rsa.RSAPrivateKey):
numbers = self._keyObject.private_numbers()
return {
"n": numbers.public_numbers.n,
"e": numbers.public_numbers.e,
"d": numbers.d,
"p": numbers.p,
"q": numbers.q,
# Use a trick: iqmp is q^-1 % p, u is p^-1 % q
"u": rsa.rsa_crt_iqmp(numbers.q, numbers.p),
}
elif isinstance(self._keyObject, dsa.DSAPublicKey):
numbers = self._keyObject.public_numbers()
return {
"y": numbers.y,
"g": numbers.parameter_numbers.g,
"p": numbers.parameter_numbers.p,
"q": numbers.parameter_numbers.q,
}
elif isinstance(self._keyObject, dsa.DSAPrivateKey):
numbers = self._keyObject.private_numbers()
return {
"x": numbers.x,
"y": numbers.public_numbers.y,
"g": numbers.public_numbers.parameter_numbers.g,
"p": numbers.public_numbers.parameter_numbers.p,
"q": numbers.public_numbers.parameter_numbers.q,
}
elif isinstance(self._keyObject, ec.EllipticCurvePublicKey):
numbers = self._keyObject.public_numbers()
return {
"x": numbers.x,
"y": numbers.y,
"curve": self.sshType(),
}
elif isinstance(self._keyObject, ec.EllipticCurvePrivateKey):
numbers = self._keyObject.private_numbers()
return {
"x": numbers.public_numbers.x,
"y": numbers.public_numbers.y,
"privateValue": numbers.private_value,
"curve": self.sshType(),
}
elif isinstance(self._keyObject, ed25519.Ed25519PublicKey):
return {
"a": self._keyObject.public_bytes(
serialization.Encoding.Raw,
serialization.PublicFormat.Raw
),
}
elif isinstance(self._keyObject, ed25519.Ed25519PrivateKey):
return {
"a": self._keyObject.public_key().public_bytes(
serialization.Encoding.Raw,
serialization.PublicFormat.Raw
),
"k": self._keyObject.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
serialization.NoEncryption()
),
}
else:
raise RuntimeError("Unexpected key type: %s" % (self._keyObject,))
def blob(self):
"""
Return the public key blob for this key. The blob is the
over-the-wire format for public keys.
SECSH-TRANS RFC 4253 Section 6.6.
RSA keys::
string 'ssh-rsa'
integer e
integer n
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
EC keys::
string 'ecdsa-sha2-[identifier]'
integer x
integer y
identifier is the standard NIST curve name
Ed25519 keys::
string 'ssh-ed25519'
string a
@rtype: L{bytes}
"""
type = self.type()
data = self.data()
if type == 'RSA':
return (common.NS(b'ssh-rsa') + common.MP(data['e']) +
common.MP(data['n']))
elif type == 'DSA':
return (common.NS(b'ssh-dss') + common.MP(data['p']) +
common.MP(data['q']) + common.MP(data['g']) +
common.MP(data['y']))
elif type == 'EC':
byteLength = (self._keyObject.curve.key_size + 7) // 8
return (
common.NS(data['curve']) + common.NS(data["curve"][-8:]) +
common.NS(
b'\x04' + utils.int_to_bytes(data['x'], byteLength) +
utils.int_to_bytes(data['y'], byteLength)))
elif type == 'Ed25519':
return common.NS(b'ssh-ed25519') + common.NS(data['a'])
else:
raise BadKeyError('unknown key type: %s' % (type,))
def privateBlob(self):
"""
Return the private key blob for this key. The blob is the
over-the-wire format for private keys:
Specification in OpenSSH PROTOCOL.agent
RSA keys::
string 'ssh-rsa'
integer n
integer e
integer d
integer u
integer p
integer q
DSA keys::
string 'ssh-dss'
integer p
integer q
integer g
integer y
integer x
EC keys::
string 'ecdsa-sha2-[identifier]'
integer x
integer y
integer privateValue
identifier is the NIST standard curve name.
Ed25519 keys:
string 'ssh-ed25519'
string a
string k || a
"""
type = self.type()
data = self.data()
if type == 'RSA':
iqmp = rsa.rsa_crt_iqmp(data['p'], data['q'])
return (common.NS(b'ssh-rsa') + common.MP(data['n']) +
common.MP(data['e']) + common.MP(data['d']) +
common.MP(iqmp) + common.MP(data['p']) +
common.MP(data['q']))
elif type == 'DSA':
return (common.NS(b'ssh-dss') + common.MP(data['p']) +
common.MP(data['q']) + common.MP(data['g']) +
common.MP(data['y']) + common.MP(data['x']))
elif type == 'EC':
encPub = self._keyObject.public_key().public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint
)
return (common.NS(data['curve']) + common.NS(data['curve'][-8:]) +
common.NS(encPub) + common.MP(data['privateValue']))
elif type == 'Ed25519':
return (common.NS(b'ssh-ed25519') + common.NS(data['a']) +
common.NS(data['k'] + data['a']))
else:
raise BadKeyError('unknown key type: %s' % (type,))
@_mutuallyExclusiveArguments([
['extra', 'comment'],
['extra', 'passphrase'],
])
def toString(self, type, extra=None, subtype=None, comment=None,
passphrase=None):
"""
Create a string representation of this key. If the key is a private
key and you want the representation of its public key, use
C{key.public().toString()}. type maps to a _toString_* method.
@param type: The type of string to emit. Currently supported values
are C{'OPENSSH'}, C{'LSH'}, and C{'AGENTV3'}.
@type type: L{str}
@param extra: Any extra data supported by the selected format which
is not part of the key itself. For public OpenSSH keys, this is
a comment. For private OpenSSH keys, this is a passphrase to
encrypt with. (Deprecated since Twisted 20.3.0; use C{comment}
or C{passphrase} as appropriate instead.)
@type extra: L{bytes} or L{unicode} or L{None}
@param subtype: A subtype of the requested C{type} to emit. Only
supported for private OpenSSH keys, for which the currently
supported subtypes are C{'PEM'} and C{'v1'}. If not given, an
appropriate default is used.
@type subtype: L{str} or L{None}
@param comment: A comment to include with the key. Only supported
for OpenSSH keys.
Present since Twisted 20.3.0.
@type comment: L{bytes} or L{unicode} or L{None}
@param passphrase: A passphrase to encrypt the key with. Only
supported for private OpenSSH keys.
Present since Twisted 20.3.0.
@type passphrase: L{bytes} or L{unicode} or L{None}
@rtype: L{bytes}
"""
if extra is not None:
# Compatibility with old parameter format.
warnings.warn(
"The 'extra' argument to "
"twisted.conch.ssh.keys.Key.toString was deprecated in "
"Twisted 20.3.0; use 'comment' or 'passphrase' instead.",
DeprecationWarning, stacklevel=3)
if self.isPublic():
comment = extra
else:
passphrase = extra
if isinstance(comment, unicode):
comment = comment.encode("utf-8")
if isinstance(passphrase, unicode):
passphrase = passphrase.encode("utf-8")
method = getattr(self, '_toString_%s' % (type.upper(),), None)
if method is None:
raise BadKeyError('unknown key type: %s' % (type,))
return method(subtype=subtype, comment=comment, passphrase=passphrase)
def _toPublicOpenSSH(self, comment=None):
"""
Return a public OpenSSH key string.
See _fromString_PUBLIC_OPENSSH for the string format.
@type comment: L{bytes} or L{None}
@param comment: A comment to include with the key, or L{None} to
omit the comment.
"""
if self.type() == 'EC':
if not comment:
comment = b''
return (self._keyObject.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH
) + b' ' + comment).strip()
b64Data = encodebytes(self.blob()).replace(b'\n', b'')
if not comment:
comment = b''
return (self.sshType() + b' ' + b64Data + b' ' + comment).strip()
def _toPrivateOpenSSH_v1(self, comment=None, passphrase=None):
"""
Return a private OpenSSH key string, in the "openssh-key-v1" format
introduced in OpenSSH 6.5.
See _fromPrivateOpenSSH_v1 for the string format.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase to encrypt the key with, or L{None}
if it is not encrypted.
"""
if passphrase:
# For now we just hardcode the cipher to the one used by
# OpenSSH. We could make this configurable later if it's
# needed.
cipher = algorithms.AES
cipherName = b'aes256-ctr'
kdfName = b'bcrypt'
blockSize = cipher.block_size // 8
keySize = 32
ivSize = blockSize
salt = randbytes.secureRandom(ivSize)
rounds = 100
kdfOptions = common.NS(salt) + struct.pack('!L', rounds)
else:
cipherName = b'none'
kdfName = b'none'
blockSize = 8
kdfOptions = b''
check = randbytes.secureRandom(4)
privKeyList = (
check + check + self.privateBlob() + common.NS(comment or b''))
padByte = 0
while len(privKeyList) % blockSize:
padByte += 1
privKeyList += chr(padByte & 0xFF)
if passphrase:
encKey = bcrypt.kdf(passphrase, salt, keySize + ivSize, 100)
encryptor = Cipher(
cipher(encKey[:keySize]),
modes.CTR(encKey[keySize:keySize + ivSize]),
backend=default_backend()
).encryptor()
encPrivKeyList = (
encryptor.update(privKeyList) + encryptor.finalize())
else:
encPrivKeyList = privKeyList
blob = (
b'openssh-key-v1\0' +
common.NS(cipherName) +
common.NS(kdfName) + common.NS(kdfOptions) +
struct.pack('!L', 1) +
common.NS(self.blob()) +
common.NS(encPrivKeyList))
b64Data = encodebytes(blob).replace(b'\n', b'')
lines = (
[b'-----BEGIN OPENSSH PRIVATE KEY-----'] +
[b64Data[i:i + 64] for i in range(0, len(b64Data), 64)] +
[b'-----END OPENSSH PRIVATE KEY-----'])
return b'\n'.join(lines) + b'\n'
def _toPrivateOpenSSH_PEM(self, passphrase=None):
"""
Return a private OpenSSH key string, in the old PEM-based format.
See _fromPrivateOpenSSH_PEM for the string format.
@type passphrase: L{bytes} or L{None}
@param passphrase: The passphrase to encrypt the key with, or L{None}
if it is not encrypted.
"""
if self.type() == 'EC':
# EC keys has complex ASN.1 structure hence we do this this way.
if not passphrase:
# unencrypted private key
encryptor = serialization.NoEncryption()
else:
encryptor = serialization.BestAvailableEncryption(passphrase)
return self._keyObject.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
encryptor)
elif self.type() == 'Ed25519':
raise ValueError(
'cannot serialize Ed25519 key to OpenSSH PEM format; use v1 '
'instead'
)
data = self.data()
lines = [b''.join((b'-----BEGIN ', self.type().encode('ascii'),
b' PRIVATE KEY-----'))]
if self.type() == 'RSA':
p, q = data['p'], data['q']
iqmp = rsa.rsa_crt_iqmp(p, q)
objData = (0, data['n'], data['e'], data['d'], p, q,
data['d'] % (p - 1), data['d'] % (q - 1),
iqmp)
else:
objData = (0, data['p'], data['q'], data['g'], data['y'],
data['x'])
asn1Sequence = univ.Sequence()
for index, value in izip(itertools.count(), objData):
asn1Sequence.setComponentByPosition(index, univ.Integer(value))
asn1Data = berEncoder.encode(asn1Sequence)
if passphrase:
iv = randbytes.secureRandom(8)
hexiv = ''.join(['%02X' % (ord(x),) for x in iterbytes(iv)])
hexiv = hexiv.encode('ascii')
lines.append(b'Proc-Type: 4,ENCRYPTED')
lines.append(b'DEK-Info: DES-EDE3-CBC,' + hexiv + b'\n')
ba = md5(passphrase + iv).digest()
bb = md5(ba + passphrase + iv).digest()
encKey = (ba + bb)[:24]
padLen = 8 - (len(asn1Data) % 8)
asn1Data += chr(padLen) * padLen
encryptor = Cipher(
algorithms.TripleDES(encKey),
modes.CBC(iv),
backend=default_backend()
).encryptor()
asn1Data = encryptor.update(asn1Data) + encryptor.finalize()
b64Data = encodebytes(asn1Data).replace(b'\n', b'')
lines += [b64Data[i:i + 64] for i in range(0, len(b64Data), 64)]
lines.append(b''.join((b'-----END ', self.type().encode('ascii'),
b' PRIVATE KEY-----')))
return b'\n'.join(lines)
def _toString_OPENSSH(self, subtype=None, comment=None, passphrase=None):
"""
Return a public or private OpenSSH string. See
_fromString_PUBLIC_OPENSSH and _fromPrivateOpenSSH_PEM for the
string formats. If extra is present, it represents a comment for a
public key, or a passphrase for a private key.
@param extra: Comment for a public key or passphrase for a
private key
@type extra: L{bytes}
@rtype: L{bytes}
"""
if self.isPublic():
return self._toPublicOpenSSH(comment=comment)
# No pre-v1 format is defined for Ed25519 keys.
elif subtype == 'v1' or (subtype is None and self.type() == 'Ed25519'):
return self._toPrivateOpenSSH_v1(
comment=comment, passphrase=passphrase)
elif subtype is None or subtype == 'PEM':
return self._toPrivateOpenSSH_PEM(passphrase=passphrase)
else:
raise ValueError('unknown subtype %s' % (subtype,))
def _toString_LSH(self, **kwargs):
"""
Return a public or private LSH key. See _fromString_PUBLIC_LSH and
_fromString_PRIVATE_LSH for the key formats.
@rtype: L{bytes}
"""
data = self.data()
type = self.type()
if self.isPublic():
if type == 'RSA':
keyData = sexpy.pack([[b'public-key',
[b'rsa-pkcs1-sha1',
[b'n', common.MP(data['n'])[4:]],
[b'e', common.MP(data['e'])[4:]]]]])
elif type == 'DSA':
keyData = sexpy.pack([[b'public-key',
[b'dsa',
[b'p', common.MP(data['p'])[4:]],
[b'q', common.MP(data['q'])[4:]],
[b'g', common.MP(data['g'])[4:]],
[b'y', common.MP(data['y'])[4:]]]]])
else:
raise BadKeyError("unknown key type %s" % (type,))
return (b'{' + encodebytes(keyData).replace(b'\n', b'') +
b'}')
else:
if type == 'RSA':
p, q = data['p'], data['q']
iqmp = rsa.rsa_crt_iqmp(p, q)
return sexpy.pack([[b'private-key',
[b'rsa-pkcs1',
[b'n', common.MP(data['n'])[4:]],
[b'e', common.MP(data['e'])[4:]],
[b'd', common.MP(data['d'])[4:]],
[b'p', common.MP(q)[4:]],
[b'q', common.MP(p)[4:]],
[b'a', common.MP(
data['d'] % (q - 1))[4:]],
[b'b', common.MP(
data['d'] % (p - 1))[4:]],
[b'c', common.MP(iqmp)[4:]]]]])
elif type == 'DSA':
return sexpy.pack([[b'private-key',
[b'dsa',
[b'p', common.MP(data['p'])[4:]],
[b'q', common.MP(data['q'])[4:]],
[b'g', common.MP(data['g'])[4:]],
[b'y', common.MP(data['y'])[4:]],
[b'x', common.MP(data['x'])[4:]]]]])
else:
raise BadKeyError("unknown key type %s'" % (type,))
def _toString_AGENTV3(self, **kwargs):
"""
Return a private Secure Shell Agent v3 key. See
_fromString_AGENTV3 for the key format.
@rtype: L{bytes}
"""
data = self.data()
if not self.isPublic():
if self.type() == 'RSA':
values = (data['e'], data['d'], data['n'], data['u'],
data['p'], data['q'])
elif self.type() == 'DSA':
values = (data['p'], data['q'], data['g'], data['y'],
data['x'])
return common.NS(self.sshType()) + b''.join(map(common.MP, values))
def sign(self, data):
"""
Sign some data with this key.
SECSH-TRANS RFC 4253 Section 6.6.
@type data: L{bytes}
@param data: The data to sign.
@rtype: L{bytes}
@return: A signature for the given data.
"""
keyType = self.type()
if keyType == 'RSA':
sig = self._keyObject.sign(data, padding.PKCS1v15(), hashes.SHA1())
ret = common.NS(sig)
elif keyType == 'DSA':
sig = self._keyObject.sign(data, hashes.SHA1())
(r, s) = decode_dss_signature(sig)
# SSH insists that the DSS signature blob be two 160-bit integers
# concatenated together. The sig[0], [1] numbers from obj.sign
# are just numbers, and could be any length from 0 to 160 bits.
# Make sure they are padded out to 160 bits (20 bytes each)
ret = common.NS(int_to_bytes(r, 20) + int_to_bytes(s, 20))
elif keyType == 'EC': # Pragma: no branch
# Hash size depends on key size
keySize = self.size()
if keySize <= 256:
hashSize = hashes.SHA256()
elif keySize <= 384:
hashSize = hashes.SHA384()
else:
hashSize = hashes.SHA512()
signature = self._keyObject.sign(data, ec.ECDSA(hashSize))
(r, s) = decode_dss_signature(signature)
rb = int_to_bytes(r)
sb = int_to_bytes(s)
# Int_to_bytes returns rb[0] as a str in python2
# and an as int in python3
if type(rb[0]) is str:
rcomp = ord(rb[0])
else:
rcomp = rb[0]
# If the MSB is set, prepend a null byte for correct formatting.
if rcomp & 0x80:
rb = b"\x00" + rb
if type(sb[0]) is str:
scomp = ord(sb[0])
else:
scomp = sb[0]
if scomp & 0x80:
sb = b"\x00" + sb
ret = common.NS(common.NS(rb) + common.NS(sb))
elif keyType == 'Ed25519':
ret = common.NS(self._keyObject.sign(data))
return common.NS(self.sshType()) + ret
def verify(self, signature, data):
"""
Verify a signature using this key.
@type signature: L{bytes}
@param signature: The signature to verify.
@type data: L{bytes}
@param data: The signed data.
@rtype: L{bool}
@return: C{True} if the signature is valid.
"""
if len(signature) == 40:
# DSA key with no padding
signatureType, signature = b'ssh-dss', common.NS(signature)
else:
signatureType, signature = common.getNS(signature)
if signatureType != self.sshType():
return False
keyType = self.type()
if keyType == 'RSA':
k = self._keyObject
if not self.isPublic():
k = k.public_key()
args = (
common.getNS(signature)[0],
data,
padding.PKCS1v15(),
hashes.SHA1(),
)
elif keyType == 'DSA':
concatenatedSignature = common.getNS(signature)[0]
r = int_from_bytes(concatenatedSignature[:20], 'big')
s = int_from_bytes(concatenatedSignature[20:], 'big')
signature = encode_dss_signature(r, s)
k = self._keyObject
if not self.isPublic():
k = k.public_key()
args = (signature, data, hashes.SHA1())
elif keyType == 'EC': # Pragma: no branch
concatenatedSignature = common.getNS(signature)[0]
rstr, sstr, rest = common.getNS(concatenatedSignature, 2)
r = int_from_bytes(rstr, 'big')
s = int_from_bytes(sstr, 'big')
signature = encode_dss_signature(r, s)
k = self._keyObject
if not self.isPublic():
k = k.public_key()
keySize = self.size()
if keySize <= 256: # Hash size depends on key size
hashSize = hashes.SHA256()
elif keySize <= 384:
hashSize = hashes.SHA384()
else:
hashSize = hashes.SHA512()
args = (signature, data, ec.ECDSA(hashSize))
elif keyType == 'Ed25519':
k = self._keyObject
if not self.isPublic():
k = k.public_key()
args = (common.getNS(signature)[0], data)
try:
k.verify(*args)
except InvalidSignature:
return False
else:
return True
def _getPersistentRSAKey(location, keySize=4096):
"""
This function returns a persistent L{Key}.
The key is loaded from a PEM file in C{location}. If it does not exist, a
key with the key size of C{keySize} is generated and saved.
@param location: Where the key is stored.
@type location: L{twisted.python.filepath.FilePath}
@param keySize: The size of the key, if it needs to be generated.
@type keySize: L{int}
@returns: A persistent key.
@rtype: L{Key}
"""
location.parent().makedirs(ignoreExistingDirectory=True)
# If it doesn't exist, we want to generate a new key and save it
if not location.exists():
privateKey = rsa.generate_private_key(
public_exponent=65537,
key_size=keySize,
backend=default_backend()
)
pem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
location.setContent(pem)
# By this point (save any hilarious race conditions) we should have a
# working PEM file. Load it!
# (Future archaeological readers: I chose not to short circuit above,
# because then there's two exit paths to this code!)
with location.open("rb") as keyFile:
privateKey = serialization.load_pem_private_key(
keyFile.read(),
password=None,
backend=default_backend()
)
return Key(privateKey)
|
the-stack_106_25031 | """
Performs a few basic commands in creating groups and moving targets around.
No errors or exceptions should be thrown.
"""
import seash
import sys
orig_stdout = sys.stdout
# To temporarily prevent printing to console, test_results.txt is used as a text
# dump as stdout is redirected to it
sys.stdout = open("test_results.txt", "w")
command_list = [
'loadkeys guest0',
'as guest0',
'browse',
'on %1',
'savestate testing_state'
]
seash.command_loop(command_list)
sys.stdout.close()
# No output or errors should be thrown in the following series of commands
sys.stdout = orig_stdout
command_list = [
'loadkeys guest0',
'as guest0',
'loadstate testing_state',
'add to test_group',
'remove %1 from test_group',
'add %1 to test_group_2',
'on %2',
'move %2 to test_group'
]
seash.command_loop(command_list)
|
the-stack_106_25032 | # coding: utf-8
"""
Experimental Looker API 3.1 Preview
This API 3.1 is in active development. Breaking changes are likely to occur to some API functions in future Looker releases until API 3.1 is officially launched and upgraded to beta status. If you have time and interest to experiment with new or modified services exposed in this embryonic API 3.1, we welcome your participation and feedback! For large development efforts or critical line-of-business projects, we strongly recommend you stick with the API 3.0 while API 3.1 is under construction. # noqa: E501
OpenAPI spec version: 3.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class LookBasic(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'title': 'str',
'content_metadata_id': 'int',
'can': 'dict(str, bool)'
}
attribute_map = {
'id': 'id',
'title': 'title',
'content_metadata_id': 'content_metadata_id',
'can': 'can'
}
def __init__(self, id=None, title=None, content_metadata_id=None, can=None): # noqa: E501
"""LookBasic - a model defined in Swagger""" # noqa: E501
self._id = None
self._title = None
self._content_metadata_id = None
self._can = None
self.discriminator = None
if id is not None:
self.id = id
if title is not None:
self.title = title
if content_metadata_id is not None:
self.content_metadata_id = content_metadata_id
if can is not None:
self.can = can
@property
def id(self):
"""Gets the id of this LookBasic. # noqa: E501
Unique Id # noqa: E501
:return: The id of this LookBasic. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this LookBasic.
Unique Id # noqa: E501
:param id: The id of this LookBasic. # noqa: E501
:type: int
"""
self._id = id
@property
def title(self):
"""Gets the title of this LookBasic. # noqa: E501
Look Title # noqa: E501
:return: The title of this LookBasic. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this LookBasic.
Look Title # noqa: E501
:param title: The title of this LookBasic. # noqa: E501
:type: str
"""
self._title = title
@property
def content_metadata_id(self):
"""Gets the content_metadata_id of this LookBasic. # noqa: E501
Id of content metadata # noqa: E501
:return: The content_metadata_id of this LookBasic. # noqa: E501
:rtype: int
"""
return self._content_metadata_id
@content_metadata_id.setter
def content_metadata_id(self, content_metadata_id):
"""Sets the content_metadata_id of this LookBasic.
Id of content metadata # noqa: E501
:param content_metadata_id: The content_metadata_id of this LookBasic. # noqa: E501
:type: int
"""
self._content_metadata_id = content_metadata_id
@property
def can(self):
"""Gets the can of this LookBasic. # noqa: E501
Operations the current user is able to perform on this object # noqa: E501
:return: The can of this LookBasic. # noqa: E501
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""Sets the can of this LookBasic.
Operations the current user is able to perform on this object # noqa: E501
:param can: The can of this LookBasic. # noqa: E501
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LookBasic):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_25033 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This script can be used to extract the VOC2007 and VOC2012 dataset files
[data, labels] from the given annotations that can be used for training. The
files can be prepared for various data splits
"""
import argparse
import logging
import os
import sys
from glob import glob
import numpy as np
from fvcore.common.file_io import PathManager
# initiate the logger
FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(__name__)
def validate_files(input_files):
"""
The valid files will have name: <class_name>_<split>.txt. We want to remove
all the other files from the input.
"""
output_files = []
for item in input_files:
if len(item.split("/")[-1].split("_")) == 2:
output_files.append(item)
return output_files
def get_data_files(split, args):
data_dir = f"{args.data_source_dir}/ImageSets/Main"
assert PathManager.exists(data_dir), "Data: {} doesn't exist".format(data_dir)
test_data_files = glob(os.path.join(data_dir, "*_test.txt"))
test_data_files = validate_files(test_data_files)
if args.separate_partitions > 0:
train_data_files = glob(os.path.join(data_dir, "*_train.txt"))
val_data_files = glob(os.path.join(data_dir, "*_val.txt"))
train_data_files = validate_files(train_data_files)
val_data_files = validate_files(val_data_files)
assert len(train_data_files) == len(val_data_files)
if split == "train":
data_files = train_data_files
elif split == "test":
data_files = test_data_files
else:
data_files = val_data_files
else:
train_data_files = glob(os.path.join(data_dir, "*_trainval.txt"))
if len(test_data_files) == 0:
# For VOC2012 dataset, we have trainval, val and train data.
train_data_files = glob(os.path.join(data_dir, "*_train.txt"))
test_data_files = glob(os.path.join(data_dir, "*_val.txt"))
test_data_files = validate_files(test_data_files)
train_data_files = validate_files(train_data_files)
data_files = train_data_files if (split == "train") else test_data_files
assert len(train_data_files) == len(test_data_files), "Missing classes"
return data_files
def get_images_labels_info(split, args):
assert PathManager.exists(args.data_source_dir), "Data source NOT found. Abort"
data_files = get_data_files(split, args)
# we will construct a map for image name to the vector of -1, 0, 1
# we sort the data_files which gives sorted class names as well
img_labels_map = {}
for cls_num, data_path in enumerate(sorted(data_files)):
# for this class, we have images and each image will have label
# 1, -1, 0 -> present, not present, ignore respectively as in VOC data.
with PathManager.open(data_path, "r") as fopen:
for line in fopen:
try:
img_name, orig_label = line.strip().split()
if img_name not in img_labels_map:
img_labels_map[img_name] = -(
np.ones(len(data_files), dtype=np.int32)
)
orig_label = int(orig_label)
# in VOC data, -1 (not present), set it to 0 as train target
if orig_label == -1:
orig_label = 0
# in VOC data, 0 (ignore), set it to -1 as train target
elif orig_label == 0:
orig_label = -1
img_labels_map[img_name][cls_num] = orig_label
except Exception:
logger.info(
"Error processing: {} data_path: {}".format(line, data_path)
)
img_paths, img_labels = [], []
for item in sorted(img_labels_map.keys()):
img_paths.append(f"{args.data_source_dir}/JPEGImages/{item}.jpg")
img_labels.append(img_labels_map[item])
output_dict = {}
if args.generate_json:
cls_names = []
for item in sorted(data_files):
name = item.split("/")[-1].split(".")[0].split("_")[0]
cls_names.append(name)
img_ids, json_img_labels = [], []
for item in sorted(img_labels_map.keys()):
img_ids.append(item)
json_img_labels.append(img_labels_map[item])
for img_idx in range(len(img_ids)):
img_id = img_ids[img_idx]
out_lbl = {}
for cls_idx in range(len(cls_names)):
name = cls_names[cls_idx]
out_lbl[name] = int(json_img_labels[img_idx][cls_idx])
output_dict[img_id] = out_lbl
return img_paths, img_labels, output_dict
def main():
parser = argparse.ArgumentParser(description="Create VOC data files")
parser.add_argument(
"--data_source_dir",
type=str,
default=None,
help="Path to data directory containing ImageSets and JPEGImages",
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help="Output directory where images/label information will be written",
)
parser.add_argument(
"--separate_partitions",
type=int,
default=0,
help="Whether to create files separately for partitions train/test/val",
)
parser.add_argument(
"--generate_json",
type=int,
default=0,
help="Whether to json files for partitions train/test/val",
)
args = parser.parse_args()
# given the data directory for the partitions train, val, and test, we will
# write numpy files for each partition.
partitions = ["train", "test"]
if args.separate_partitions > 0:
partitions.append("val")
for partition in partitions:
logger.info("========Preparing {} data files========".format(partition))
imgs_info, lbls_info, output_dict = get_images_labels_info(partition, args)
img_info_out_path = f"{args.output_dir}/{partition}_images.npy"
label_info_out_path = f"{args.output_dir}/{partition}_labels.npy"
logger.info("=================SAVING DATA files=======================")
logger.info(f"partition: {partition} saving img_paths to: {img_info_out_path}")
logger.info(f"partition: {partition} saving lbls_paths: {label_info_out_path}")
logger.info(f"partition: {partition} imgs: {np.array(imgs_info).shape}")
np.save(img_info_out_path, np.array(imgs_info))
np.save(label_info_out_path, np.array(lbls_info))
if args.generate_json:
json_out_path = f"{args.output_dir}/{partition}_targets.json"
import json
with PathManager.open(json_out_path, "w") as fp:
json.dump(output_dict, fp)
logger.info("Saved Json to: {}".format(json_out_path))
logger.info("DONE!")
if __name__ == "__main__":
main()
|
the-stack_106_25034 | import cv2
img='/Users/xh/Downloads/ks/qa-irs/app/task/frames/60/irs-1cd298e4ed39042588cb5224662f79b2.mp4.mp4/00001.jpg'
# 把图片转换为单通道的灰度图
img=cv2.imread(img)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 获取灰度图矩阵的行数和列数
r, c = gray_img.shape[:2]
piexs_sum = r * c # 整个弧度图的像素个数为r*c
# 获取偏暗的像素(表示0~19的灰度值为暗) 此处阈值可以修改
dark_points = (gray_img < 20)
target_array = gray_img[dark_points]
dark_sum = target_array.size
# 判断灰度值为暗的百分比
dark_prop = dark_sum / (piexs_sum)
if dark_prop >= 0.85:
print("black")
else:
print("no black")
|
the-stack_106_25037 | from pymongo import MongoClient
mongo_uri = "mongodb://admin:[email protected]:21182/c4e"
client = MongoClient(mongo_uri)
db = client.get_default_database()
blog = db["posts"]
post = {
"title" : "Ai đưa em về",
"author" : "Duy Anh",
"content" : """
Đêm nay ai đưa em về
Đường khuya sao trời lấp lánh
Đêm nay ai đưa em về
Mắt em sao chiếu long lanh
"""
}
blog.insert_one(post)
|
the-stack_106_25039 | from __future__ import print_function
"""
panotti_models.py
Author: Scott Hawley
Where we'll put various NN models.
MyCNN: This is kind of a mixture of Keun Woo Choi's code https://github.com/keunwoochoi/music-auto_tagging-keras
and the MNIST classifier at https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
"""
import keras
import tensorflow as tf
from keras.models import Sequential, Model, load_model, save_model
from keras.layers import (
Input,
Dense,
TimeDistributed,
LSTM,
Dropout,
Activation,
Convolution2D,
MaxPooling2D,
Flatten,
Conv2D,
)
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU
from keras.optimizers import SGD, Adam
from os.path import isfile
from panotti.multi_gpu import *
from tensorflow.python.client import device_lib
from panotti.multi_gpu import make_parallel, get_available_gpus
import h5py
# This is a VGG-style network that I made by 'dumbing down' @keunwoochoi's compact_cnn code
# I have not attempted much optimization, however it *is* fairly understandable
def MyCNN_Keras2(X_shape, nb_classes, nb_layers=4):
# Inputs:
# X_shape = [ # spectrograms per batch, # audio channels, # spectrogram freq bins, # spectrogram time bins ]
# nb_classes = number of output n_classes
# nb_layers = number of conv-pooling sets in the CNN
from keras import backend as K
K.set_image_data_format(
"channels_last"
) # SHH changed on 3/1/2018 b/c tensorflow prefers channels_last
nb_filters = 32 # number of convolutional filters = "feature maps"
kernel_size = (3, 3) # convolution kernel size
pool_size = (2, 2) # size of pooling area for max pooling
cl_dropout = 0.5 # conv. layer dropout
dl_dropout = 0.6 # dense layer dropout
print(" MyCNN_Keras2: X_shape = ", X_shape, ", channels = ", X_shape[3])
input_shape = (X_shape[1], X_shape[2], X_shape[3])
model = Sequential()
model.add(
Conv2D(
nb_filters,
kernel_size,
padding="valid",
input_shape=input_shape,
name="Input",
)
)
model.add(BatchNormalization(axis=1))
model.add(
Activation("relu")
) # Leave this relu & BN here. ELU is not good here (my experience)
for layer in range(nb_layers - 1): # add more layers than just the first
model.add(Conv2D(nb_filters, kernel_size))
# model.add(BatchNormalization(axis=1)) # ELU authors reccommend no BatchNorm. I confirm.
model.add(Activation("elu"))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(cl_dropout))
model.add(Flatten())
model.add(Dense(128)) # 128 is 'arbitrary' for now
# model.add(Activation('relu')) # relu (no BN) works ok here, however ELU works a bit better...
model.add(Activation("elu"))
model.add(Dropout(dl_dropout))
model.add(Dense(nb_classes))
model.add(Activation("softmax", name="Output"))
return model
def old_model(
X_shape, nb_classes, nb_layers=4
): # original model used in reproducing Stein et al
from keras import backend as K
K.set_image_data_format(
"channels_first"
) # old model used channels_first, new one uses channels_last. see make_melgram utils in datautils.py
nb_filters = 32 # number of convolutional filters to use
pool_size = (2, 2) # size of pooling area for max pooling
kernel_size = (3, 3) # convolution kernel size
input_shape = (X_shape[1], X_shape[2], X_shape[3])
model = Sequential()
model.add(
Conv2D(
nb_filters, kernel_size, padding="valid", input_shape=input_shape
)
)
model.add(BatchNormalization(axis=1))
model.add(Activation("relu"))
for layer in range(nb_layers - 1):
model.add(Convolution2D(nb_filters, kernel_size))
# model.add(BatchNormalization(axis=1))
# model.add(ELU(alpha=1.0))
model.add(Activation("elu"))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation("elu"))
# model.add(ELU(alpha=1.0))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation("softmax"))
return model
# Note: haven't gotten imagenet models to work...pretty much at all, they get stuck around 50% accuracy.
# Routine for other image classifier models TODO: Training gets stuck at very high losses. Not sure why
def imageModels(X, nb_classes, weights=None):
# Note these all require exactly 3 input channels.
from keras.applications import Xception, VGG16
from keras.applications.inception_v3 import InceptionV3
from keras.applications.nasnet import NASNetLarge, NASNetMobile
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.utils.generic_utils import CustomObjectScope
from keras.applications.mobilenet import MobileNet, DepthwiseConv2D
weights = (
"imagenet"
) # Could resize images to, e.g. 224 x 224 and then use weights='imagenet'.
# Need to use --mels=224 --dur=2.6s with preprocess_data.py and --tile with train_network.
input_shape = X.shape[1:]
print("input_shape = ", input_shape)
if False and (
3 != input_shape[0]
): # then we're going to add a front end that gives us 3 channels
front_end = Input(shape=input_shape)
front_end = Conv2D(
3,
(3, 3),
padding="valid",
input_shape=input_shape,
activation="relu",
)(front_end)
input_shape = (
X.shape[1],
X.shape[2],
3,
) # and now we'll set input_shape as the rest of the network wants
else:
front_end = Input(shape=input_shape)
# base_model = NASNetMobile(input_shape=input_shape, weights=weights, include_top=False, input_tensor=front_end)
with CustomObjectScope(
{
"relu6": keras.applications.mobilenet.relu6,
"DepthwiseConv2D": keras.applications.mobilenet.DepthwiseConv2D,
}
):
base_model = MobileNet(
input_shape=input_shape,
weights=weights,
include_top=False,
input_tensor=front_end,
dropout=0.6,
)
# base_model = Xception(input_shape=X[0].shape, weights=weights, include_top=False, input_tensor=front_end)
top_model = Sequential() # top_model gets tacked on to pretrained model
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(128)) # 128 is 'arbitrary' for now
top_model.add(Dense(nb_classes, name="FinalOutput")) # Final output layer
# top_model.load_weights('bootlneck_fc_model.h5')
model = Model(
inputs=base_model.input, outputs=top_model(base_model.output)
)
return model
# Used for when you want to use weights from a previously-trained model,
# with a different set/number of output classes
def attach_new_weights(
model, new_nb_classes, n_pop=2, n_p_dense=None, last_dropout=0.6
):
# "penultimate" dense layer was originally 64 or 128. can change it here
if n_p_dense is not None:
n_pop = 5
# pop off the last n_pop layers. We definitely want the last 2: Activation() and Dense(nb_classes)
for i in range(n_pop):
model.pop()
if n_p_dense is not None:
model.add(Dense(n_p_dense))
model.add(Activation("elu"))
model.add(Dropout(last_dropout))
# attach final output layers
model.add(
Dense(new_nb_classes)
) # new_nb_classes = new number of output classes
model.add(Activation("softmax"))
return model
# Next two routines are for attaching class names inside the saved model .hdf5 weights file
# From https://stackoverflow.com/questions/44310448/attaching-class-labels-to-a-keras-model
def load_model_ext(filepath, custom_objects=None):
model = load_model(
filepath, custom_objects=custom_objects
) # load the model normally
# --- Now load it again and look for additional useful metadata
f = h5py.File(filepath, mode="r")
# initialize class_names with numbers (strings) in case hdf5 file doesn't have any
output_length = model.layers[-1].output_shape[1]
class_names = [str(x) for x in range(output_length)]
if "class_names" in f.attrs:
class_names = f.attrs.get("class_names").tolist()
class_names = [x.decode() for x in class_names]
f.close()
return model, class_names
def save_model_ext(model, filepath, overwrite=True, class_names=None):
save_model(model, filepath, overwrite)
if class_names is not None:
f = h5py.File(filepath, mode="a")
f.attrs["class_names"] = np.array(
class_names, dtype="S"
) # have to encode it somehow
f.close()
# Freezing speeds up training by only declaring all but the last leave_last
# layers as non-trainable; but likely results in lower accuracy
# NOTE: In practice this achieves so little that I don't even use this:
# Most of the model parameters are in the last few layers anyway
def freeze_layers(model, train_last=3):
num_layers = len(model.layers)
freeze_layers = min(
num_layers - train_last, num_layers
) # any train_last too big, freezes whole model
if train_last < 0: # special flag to disable freezing
freeze_layers = 0
print("Freezing ", freeze_layers, "/", num_layers, " layers of model")
for i in range(freeze_layers):
model.layers[i].trainable = False
return model
# This is the main routine for setting up a model
def setup_model(
X,
class_names,
nb_layers=4,
try_checkpoint=True,
weights_file="weights.hdf5",
quiet=False,
missing_weights_fatal=False,
multi_tag=False,
):
""" In the following, the reason we hang on to & return serial_model,
is because Keras can't save parallel models, but according to fchollet
the serial & parallel versions will always share the same weights
(Strange but true!)
"""
# Here's where one might 'swap out' different neural network 'model' choices
serial_model = MyCNN_Keras2(
X.shape, nb_classes=len(class_names), nb_layers=nb_layers
)
# serial_model = old_model(X.shape, nb_classes=len(class_names), nb_layers=nb_layers)
# serial_model = imageModels(X, nb_classes=len(class_names))
# don't bother with freezing layers, at least with the hope of trianing on a laptop. doesn't speed up by more than a factor of 2.
# serial_model = freeze_layers(serial_model, train_last = 3)
# Initialize weights using checkpoint if it exists.
if try_checkpoint:
print("Looking for previous weights...")
if isfile(weights_file):
print("Weights file detected. Loading from ", weights_file)
loaded_model = load_model(
weights_file
) # strip any previous parallel part, to be added back in later
serial_model.set_weights(
loaded_model.get_weights()
) # assign weights based on checkpoint
else:
if missing_weights_fatal:
print("Need weights file to continue. Aborting")
assert not missing_weights_fatal
else:
print("No weights file detected, so starting from scratch.")
opt = (
"adadelta"
) # Adam(lr = 0.00001) # So far, adadelta seems to work the best of things I've tried
metrics = ["accuracy"]
if (
multi_tag
): # multi_tag means more than one class can be 'chosen' at a time; default is 'only one'
loss = "binary_crossentropy"
else:
loss = "categorical_crossentropy"
serial_model.compile(loss=loss, optimizer=opt, metrics=metrics)
# Multi-GPU "parallel" capability
gpu_count = get_available_gpus()
if gpu_count >= 2:
print(" Parallel run on", gpu_count, "GPUs")
model = make_parallel(serial_model, gpu_count=gpu_count)
model.compile(loss=loss, optimizer=opt, metrics=metrics)
else:
model = serial_model
if not quiet:
print(
"Summary of serial model (duplicated across", gpu_count, "GPUs):"
)
serial_model.summary() # print out the model layers
return (
model,
serial_model,
) # fchollet says to hang on to the serial model for checkpointing
|
the-stack_106_25043 | """
SMPP validators
"""
from jasmin.protocols.validation import AbstractCredentialValidator
from jasmin.protocols.smpp.error import *
from jasmin.vendor.smpp.pdu.constants import priority_flag_value_map
from jasmin.vendor.smpp.pdu.pdu_types import RegisteredDeliveryReceipt, RegisteredDelivery
class SmppsCredentialValidator(AbstractCredentialValidator):
"""Will check for user MtMessagingCredential"""
def __init__(self, action, user, submit_sm):
AbstractCredentialValidator.__init__(self, action, user)
self.submit_sm = submit_sm
def _checkSendAuthorizations(self):
"""MT Authorizations check"""
if not self.user.mt_credential.getAuthorization('smpps_send'):
raise AuthorizationError(
'Authorization failed for username [%s] (Can not send MT messages).' % self.user)
if (not self.user.mt_credential.getAuthorization('set_dlr_level') and
self.submit_sm.params['registered_delivery'] != RegisteredDelivery(
RegisteredDeliveryReceipt.NO_SMSC_DELIVERY_RECEIPT_REQUESTED)):
raise AuthorizationError(
'Authorization failed for username [%s] (Setting dlr level is not authorized).' % self.user)
if (not self.user.mt_credential.getAuthorization('set_source_address') and
len(self.submit_sm.params['source_addr']) > 0):
raise AuthorizationError(
'Authorization failed for username [%s] (Setting source address is not authorized).' % self.user)
if (not self.user.mt_credential.getAuthorization('set_priority') and
str(self.submit_sm.params['priority_flag']) != priority_flag_value_map[0]):
raise AuthorizationError(
'Authorization failed for username [%s] (Setting priority is not authorized).' % self.user)
def _checkSendFilters(self):
"""MT Filters check"""
if (self.user.mt_credential.getValueFilter('destination_address') is None or
not self.user.mt_credential.getValueFilter('destination_address').match(
self.submit_sm.params['destination_addr'])):
raise FilterError(
'Value filter failed for username [%s] (destination_address filter mismatch).' % self.user,
'destination_address')
if (self.user.mt_credential.getValueFilter('source_address') is None or
not self.user.mt_credential.getValueFilter('source_address').match(
self.submit_sm.params['source_addr'])):
raise FilterError(
'Value filter failed for username [%s] (source_address filter mismatch).' % self.user,
'source_address')
if (self.user.mt_credential.getValueFilter('priority') is None or
not self.user.mt_credential.getValueFilter('priority').match(
str(self.submit_sm.params['priority_flag'].index))):
raise FilterError(
'Value filter failed for username [%s] (priority filter mismatch).' % self.user,
'priority')
if (self.user.mt_credential.getValueFilter('content') is None or
not self.user.mt_credential.getValueFilter('content').match(self.submit_sm.params['short_message'])):
raise FilterError(
'Value filter failed for username [%s] (content filter mismatch).' % self.user,
'content')
def updatePDUWithUserDefaults(self, PDU):
"""Will update SubmitSmPDU.params from User credential defaults whenever a
SubmitSmPDU.params item is None"""
if (self.user.mt_credential.getDefaultValue('source_address') is not None and
(PDU.params['source_addr'] is None or len(PDU.params['source_addr']) == 0)):
PDU.params['source_addr'] = self.user.mt_credential.getDefaultValue('source_address')
return PDU
def validate(self):
"""Will validate requests through Authorizations and ValueFilters credential check"""
if self.action == 'Send':
self._checkSendAuthorizations()
self._checkSendFilters()
else:
raise CredentialValidationError('Unknown action [%s].' % self.action)
|
the-stack_106_25045 | """
Loggers help keep track of the workings of your evolutionary algorithm. By
default, each Population is initialized with a BaseLogger, which you can use
by using the .log() method of the population. If you want more complex
behaviour, you can supply another logger to the Population on initialisation.
"""
import datetime as dt
import os
import json
import logging
import sys
import uuid
from evol.exceptions import PopulationIsNotEvaluatedException
from evol.population import BasePopulation
class BaseLogger:
"""
The `evol.BaseLogger` is the most basic logger in evol.
You can supply it to a population so that the population
knows how to handle the `.log()` verb.
"""
def __init__(self, target=None, stdout=False, fmt='%(asctime)s,%(message)s'):
self.file = target
if target is not None:
if not os.path.exists(os.path.split(target)[0]):
raise RuntimeError(f"path to target {os.path.split(target)[0]} does not exist!")
formatter = logging.Formatter(fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S')
self.logger = logging.getLogger(name=f"{uuid.uuid4()}")
if not self.logger.handlers:
# we do this extra step because loggers can behave in strange ways otherwise
# https://navaspot.wordpress.com/2015/09/22/same-log-messages-multiple-times-in-python-issue/
if target:
file_handler = logging.FileHandler(filename=target)
file_handler.setFormatter(fmt=formatter)
self.logger.addHandler(file_handler)
if stdout:
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setFormatter(fmt=formatter)
self.logger.addHandler(stream_handler)
self.logger.setLevel(level=logging.INFO)
@staticmethod
def check_population(population: BasePopulation) -> None:
if not population.is_evaluated:
raise PopulationIsNotEvaluatedException('Population must be evaluated when logging.')
def log(self, population, **kwargs):
"""
The logger method of the Logger object determines what will be logged.
:param population: `evol.Population` object
:return: nothing, it merely logs to a file and perhaps stdout
"""
self.check_population(population)
values = ','.join([str(item) for item in kwargs.values()])
if values != '':
values = f',{values}'
for i in population:
self.logger.info(f'{population.id},{i.id},{i.fitness}' + values)
class SummaryLogger(BaseLogger):
"""
The `evol.SummaryLogger` merely logs statistics per population and nothing else.
You are still able to log to stdout as well.
"""
def log(self, population, **kwargs):
self.check_population(population)
values = ','.join([str(item) for item in kwargs.values()])
if values != '':
values = f',{values}'
fitnesses = [i.fitness for i in population]
self.logger.info(f'{min(fitnesses)},{sum(fitnesses) / len(fitnesses)},{max(fitnesses)}' + values)
class MultiLogger:
"""
The `evol.Multilogger` is a logger object that can handle writing to two files.
It is here for demonstration purposes to show how you could customize the logging.
The only thing that matters is that all logging is handled by the `.log()`
call. So we are free to record to multiple files if we want as well. This is
not per se best practice but it would work.
"""
def __init__(self, file_individuals, file_population):
self.file_individuals = file_individuals
self.file_population = file_population
def log(self, population, **kwargs):
"""
The logger method of the Logger object determines what will be logged.
:param population: population to log
:return: generator of strings to be handled
"""
ind_generator = (f'{dt.datetime.now()},{population.id},{i.id},{i.fitness}' for i in population)
fitnesses = [i.fitness for i in population]
data = {
'ts': str(dt.datetime.now()),
'mean_ind': sum(fitnesses) / len(fitnesses),
'min_ind': min(fitnesses),
'max_ind': max(fitnesses)
}
dict_to_log = {**kwargs, **data}
self.handle(ind_generator, dict_to_log)
def handle(self, ind_generator, dict_to_log):
"""
The handler method of the Logger object determines how it will be logged.
In this case we print if there is no file and we append to a file otherwise.
"""
with open(self.file_population, 'a') as f:
f.write(json.dumps(dict_to_log))
with open(self.file_population, 'a') as f:
f.writelines(ind_generator)
|
the-stack_106_25047 | import cv2 as cv
import numpy as np
import os
from time import time, sleep
from windowcapture import WindowCapture
from vision import Vision
from hsvfilter import HsvFilter, grab_object_preset
from actions import Actions, Movement_Handler
# Allow 3 seconds to open the gam window
sleep(3)
# Change the working directory to the folder this script is in.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Grab the gamename from the text file
with open("gamename.txt") as f:
gamename = f.readline()
# Initialise the actions object
# actions = Actions(test_mode=True)
movement = Movement_Handler(test_mode=False)
# The next block of code is for detecting the object in question
object_filter, object_custom_rect = grab_object_preset(
object_name="other_player_map_loc")
# WindowCapture.list_window_names()
# This is only for testing and fixing the 150% screen scaling I have
# object_custom_rect = list(map(lambda x: int(x*1.5), object_custom_rect))
# initialize the WindowCapture class for object detection
object_wincap = WindowCapture(
gamename, custom_rect=object_custom_rect)
# initialize the Vision class
object_vision = Vision('otherplayer.jpg')
# initialize the trackbar window
# object_vision.init_control_gui()
# This next block of code is for detecting the current player position
# It uses the same part of the screen as the other player bit above
# So no need to grab another wincap object
player_filter, player_custom_rect = grab_object_preset(
object_name="player_map_loc")
player_vision = Vision('playerv2.jpg')
player_vision_inverted = Vision("playerv2_inv.jpg")
# This block of code is for detecting if in a dungeon or not
dunchk_filter, dunchk_custom_rect = grab_object_preset(
object_name="dungeon_check")
# This is only for testing and fixing the 150% screen scaling I have
# dunchk_custom_rect = list(map(lambda x: int(x*1.5), dunchk_custom_rect))
dunchk_wincap = WindowCapture(
gamename, custom_rect=dunchk_custom_rect)
dunchk_vision = Vision('dunchk_67.jpg')
# Start the movement bot
movement.movement_start()
loop_time = time()
while(True):
# get an updated image of the game at specified area
dunchk_screenshot = dunchk_wincap.get_screenshot()
# pre-process the image to help with detection
dunchk_output_image = dunchk_vision.apply_hsv_filter(
dunchk_screenshot, dunchk_filter)
# do object detection, this time grab the points
dunchk_rectangles = dunchk_vision.find(
dunchk_output_image, threshold=0.27, epsilon=0.5)
# then if currently in a dungeon search for object
if len(dunchk_rectangles) == 1:
# get an updated image of the game at map loc
screenshot = object_wincap.get_screenshot()
# then try to detect the other player
output_image = object_vision.apply_hsv_filter(
screenshot, object_filter)
# filter_image = output_image.copy()
# do object detection, this time grab the points
rectangles = object_vision.find(
output_image, threshold=0.41, epsilon=0.5)
# draw the detection results onto the original image
points = object_vision.get_click_points(rectangles)
if len(points) == 1:
output_image = object_vision.draw_crosshairs(screenshot, points)
# If there is only one value found
# i.e. no false positives and players are not on top of each other
# Then figure out keypresses required to move towards other player
# And then implement
# print("Other player is located relatively x={} y={}".format(
# points[0][0]-131, 107-points[0][1]))
# Then grab the current player position and feed it in as coords
player_image = player_vision.apply_hsv_filter(
screenshot, player_filter)
player_rectangles = player_vision.find(
player_image, threshold=0.41, epsilon=0.5)
player_points = player_vision.get_click_points(player_rectangles)
if len(player_points) == 1:
output_image = object_vision.draw_crosshairs(
output_image, player_points)
relx = points[0][0]-player_points[0][0]
rely = player_points[0][1]-points[0][1]
#actions.move_direction(relx, rely)
movement.movement_update_xy(relx, rely)
# print("found player facing down")
# sleep(0.1)
else:
# Check to see if the image is just inverted
# Seem to have trouble if it is upside down
# Probably a function that can solve this problem somewhere
inverted_player_image = player_vision.apply_hsv_filter(
screenshot, player_filter)
inverted_player_rectangles = player_vision.find(
inverted_player_image, threshold=0.41, epsilon=0.5)
inverted_player_points = player_vision.get_click_points(
inverted_player_rectangles)
if len(inverted_player_points) == 1:
output_image = object_vision.draw_crosshairs(
output_image, inverted_player_points)
relx = points[0][0]-inverted_player_points[0][0]
rely = inverted_player_points[0][1]-points[0][1]
movement.movement_update_xy(relx, rely)
# print("found player facing up")
else:
movement.movement_update_xy(0, 0)
else:
# Clear all keypresses
# print("Can't detect other player, stopping movement")
# actions.stop_keypresses(movement_only=True)
movement.movement_update_xy(0, 0)
# sleep(0.25)
# display the processed image
cv.imshow('Matches', screenshot)
# cv.imshow('Filtered', filter_image)
else:
# print("Not in dungeon, slowing refresh rate")
# actions.stop_keypresses(movement_only=True)
movement.movement_update_xy(0, 0)
sleep(0.5)
cv.imshow("Dunchk", dunchk_output_image)
# debug the loop rate
print('FPS {}'.format(1 / (time() - loop_time)))
loop_time = time()
# press 'q' with the output window focused to exit.
# waits 1 ms every loop to process key presses
if cv.waitKey(1) == ord('q'):
cv.destroyAllWindows()
movement.movement_stop()
break
print('Done.')
|
the-stack_106_25049 | from typing import Dict, List
import streamlit as st
def get_case_default(features: List[str]) -> int:
"""
Heuristic for selecting a sound attribute as default for case id
:param features: list of available features
:return: index of the suggested feature in the list
"""
for idx, f in enumerate(features):
if f.lower().endswith('id'):
return idx
return 0
def get_activity_default(features: List[str]) -> int:
"""
Heuristic for selecting a sound attribute as default for activity
:param features: list of available features
:return: index of the suggested feature in the list
"""
for idx, f in enumerate(features):
if 'activity' in f.lower():
return idx
return 0
def get_starttime_default(features: List[str]) -> int:
"""
Heuristic for selecting a sound attribute as default for start time
:param features: list of available features
:return: index of the suggested feature in the list
"""
for idx, f in enumerate(features):
if 'start' in f.lower():
return idx
return 0
def get_duration_default(features: List[str]) -> int:
for idx, f in enumerate(features):
if 'duration' in f.lower():
return idx
return 0
def show(features: List[str], in_sidebar=True) -> Dict:
s = st.sidebar if in_sidebar else st
s.subheader("Map features to attributes")
unmapped = features.copy()
mapped = []
case_attr = s.selectbox("Case:", unmapped, get_case_default(unmapped))
mapped.append(case_attr)
unmapped = [f for f in unmapped if f not in mapped]
activity_attr = s.selectbox("Activity:", unmapped, get_activity_default(unmapped))
# mapped += activity_attr
mapped.append(activity_attr)
unmapped = [f for f in unmapped if f not in mapped]
starttime_attr = s.selectbox("Starttime:", unmapped, get_starttime_default(unmapped))
mapped.append(starttime_attr)
unmapped = [f for f in unmapped if f not in mapped]
duration_attr = s.selectbox("Duration:", unmapped, get_duration_default(unmapped))
mapped.append(duration_attr)
unmapped = [f for f in unmapped if f not in mapped]
res_attrs = s.multiselect("Resources:", unmapped)
mapped += res_attrs
unmapped = [f for f in unmapped if f not in mapped]
return {
"case_id_attr": case_attr,
"activity_attr": activity_attr,
"timestamp_attr": starttime_attr,
"duration_attr": duration_attr,
"resource_attrs": res_attrs
}
|
the-stack_106_25050 | import os
import boto3
from common import AWSServiceCollector, AWS_REGIONS_SET
sns = boto3.client('sns')
sts = boto3.client('sts')
class ElasticacheCollector(AWSServiceCollector):
boto3_service_name = 'elasticache'
def _collect_assets(self):
max_records = 100
marker = ''
while True:
response = self.client.describe_cache_clusters(
MaxRecords=max_records,
Marker=marker,
)
clusters = response['CacheClusters']
for cluster in clusters:
# get cache nodes' endpoints:
cluster_nodes = cluster.get('Nodes')
for node in cluster_nodes:
endpoint_domain = node['Endpoint']['Address']
self.data_store_endpoints.add(endpoint_domain)
# get configuration endpoint, if any (for Memcached clusters):
configuration_endpoint = cluster.get('ConfigurationEndpoint')
if configuration_endpoint:
endpoint_domain = configuration_endpoint['Address']
self.data_store_endpoints.add(endpoint_domain)
# check if more pages of results are to be fetched:
marker = response.get('Marker')
if marker is None:
break
def handler_fan_out(event, context):
"""
Publishes an SNS message for each region from which the assets are to be
collected.
"""
elasticache_regions = AWS_REGIONS_SET
for region in elasticache_regions:
sns.publish(
TopicArn=os.environ['SNSTopicCollectAWSElasticacheARN'],
Message=region,
)
def handler_regional(event, context):
region = event['Records'][0]['Sns']['Message']
response = sts.assume_role(
RoleArn=os.environ['AWSIAMRoleARN'],
RoleSessionName='CloudFrontierAssetCollector',
# ExternalId='...',
)
print(f'Assumed IAM role')
credentials = response['Credentials']
client_session = boto3.Session(
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
region_name=region,
)
print(f'Created session')
ElasticacheCollector(client_session).collect()
|
the-stack_106_25051 |
from util.enums import ModelAttribute
from util.trainer import load_model
import yaml
import argparse
import numpy as np
import torch
from fine_tune_vae.models import *
from fine_tune_vae.experiment import VAEXperiment
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from pytorch_lightning.logging import TestTubeLogger
def train_vae():
parser = argparse.ArgumentParser(description='Generic runner for VAE models')
parser.add_argument('--config', '-c',
dest="filename",
metavar='FILE',
help = 'path to the config file',
default='fine_tune_vae/configs/vae.yaml')
parser.add_argument('--epoch',
type=int,
default=-1)
# default=336)
parser.add_argument('--network_pkl',
default="training_runs/00074-SNGAN-0.100-0.100000-/checkpoint")
# default="training_runs/00045-SNGAN-0.0-0.000-/checkpoint")
parser.add_argument('--resume',
default="y")
parser.add_argument('--evaluate_interval', type=int,
default=15) #5
parser.add_argument('--fine_tune_module',
default="d_and_e") #e, g_d_e
parser.add_argument('--model_type',
default="SNGAN_VAE")
args = parser.parse_args()
with open(args.filename, 'r') as file:
try:
config = yaml.safe_load(file)
except yaml.YAMLError as exc:
print(exc)
tt_logger = TestTubeLogger(
save_dir=config['logging_params']['save_dir'],
name=config['logging_params']['name'],
debug=False,
create_git_tag=False,
)
# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False
model=load_vae_model(args,config)
experiment = VAEXperiment(model,
config['exp_params'], config['model_params']['latent_dim'],args.evaluate_interval,args.fine_tune_module)
runner = Trainer(default_root_dir=f"{tt_logger.save_dir}",
min_epochs =1,
logger=tt_logger,
log_save_interval=100,
train_percent_check=1.,
val_percent_check=1.,
num_sanity_val_steps=5,
early_stop_callback = False,
**config['trainer_params'])
print(f"======= Training {config['model_params']['name']} =======")
runner.fit(experiment)
def load_vae_model(args,config):
if config['model_params']['name']=="VaeGan":
model_attribute=ModelAttribute[args.model_type]
generator,discriminator,model = load_model(config['model_params']['latent_dim'],"dcgan",model_attribute,0,0,config['exp_params']['batch_size'],None )
if args.resume=="y":
if args.epoch>=0:
gen_pkl=args.network_pkl+"/gen_"+str(args.epoch)
dis_pkl=args.network_pkl+"/disc_"+str(args.epoch)
else:
gen_pkl=args.network_pkl+"/gen"
dis_pkl=args.network_pkl+"/disc"
generator.load_state_dict(torch.load(gen_pkl),strict=False)
discriminator.load_state_dict(torch.load(dis_pkl),strict=False)
if args.fine_tune_module=="d_and_e":
generator.requires_grad_(False)
elif args.fine_tune_module=="e":
generator.requires_grad_(False)
for name ,child in (discriminator.named_children()):
if name!='fc_mu' and name!='fc_var' :
child.requires_grad_(False)
else:
model = vae_models[config['model_params']['name']](**config['model_params'])
return model
train_vae() |
the-stack_106_25052 | from kg.checkers import * ### @import
@chk.get_one_input
def get_one_input(file, **kwargs):
n = int(next(file))
a = list(map(int, next(file).strip().split()))
ensure(len(a) == n, "Invalid length in input", exc=Fail)
return a
@chk.get_output_for_input
@chk.get_judge_data_for_input
def get_output_for_input(file, a, **kwargs):
exc = kwargs['exc']
try:
m = int(next(file).rstrip())
b = list(map(int, next(file).rstrip().split(' ')))
except Exception as e:
raise exc("Failed to get a sequence: " + str(e)) from e
ensure(m >= 0, "Invalid length", exc=exc)
ensure(len(b) == m, lambda: exc(f"Expected {m} numbers but got {len(b)}"))
return b
def check_valid(a, b, exc=Exception):
# check subsequence
j = 0
for i in range(len(a)):
if j < len(b) and a[i] == b[j]: j += 1
ensure(j == len(b), "Not a subsequence!", exc=exc)
# check distinct
ensure(len(b) == len(set(b)), "Values not unique!", exc=exc)
@set_single_checker(no_extra_chars=True)
def check_solution(a, cont_b, judge_b, **kwargs):
check_valid(a, cont_b, exc=WA)
check_valid(a, judge_b, exc=Fail) # remove for speed
if len(cont_b) < len(judge_b): raise WA("Suboptimal solution")
if len(cont_b) > len(judge_b): raise Fail("Judge data incorrect!")
return 1.0
if __name__ == '__main__': chk(title="Split")
|
the-stack_106_25054 | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author: Shengjia Yan
# 2020-10-14 Wednesday
# @email: [email protected]
# Copyright @ Shengjia Yan. All Rights Reserved.
# Process lingspam corpus to generate trainset and testset in csv files.
import os
import csv
import codecs
import string
TRAINSET_PATH = '../data/train/'
TESTSET_PATH = '../data/test/'
LINGSPAM_TRAIN_CSV_PATH = TRAINSET_PATH + 'lingspam_train.csv'
LINGSPAM_TEST_CSV_PATH = TESTSET_PATH + 'lingspam_test.csv'
def generate_trainset(input_dir, output_path):
l = []
for root, dirs, files in os.walk(input_dir):
path = root.split(os.sep)
part_name = os.path.basename(root)
for file in files:
if not file.endswith('.txt'):
continue
d = {}
file_name = file.replace('.txt', '')
file_path = os.path.join(root, file)
with codecs.open(file_path, mode='r', encoding='utf8', errors='ignore') as f:
line_counter = 0
for line in f.readlines():
line = line.strip()
if line_counter == 0: # subject
subject = line.replace('Subject:', '').strip()
if line_counter == 2:
email = line
# email = [word for word in email if word not in string.punctuation]
# email = [word for word in email if len(word) > 1]
line_counter += 1
d['email_subject'] = subject
d['email_body'] = email
d['part_name'] = part_name
d['file_name'] = file_name
d['is_spam'] = 1 if file_name.startswith('spmsg') else 0
l.append(d)
with codecs.open(output_path, mode='w', encoding='utf8', errors='ignore') as out_file:
writer = csv.DictWriter(out_file, l[0].keys())
writer.writeheader()
for row in l:
writer.writerow(row)
if __name__ == "__main__":
generate_trainset(TRAINSET_PATH, LINGSPAM_TRAIN_CSV_PATH)
generate_trainset(TESTSET_PATH, LINGSPAM_TEST_CSV_PATH)
|
the-stack_106_25057 | import numpy as np
from gym.spaces import Discrete, Box, MultiDiscrete
from ..utils import instantiate
class PowerElectronicConverter:
"""
Base class for all converters in a SCMLSystem.
Properties:
| *voltages(tuple(float, float))*: Determines which output voltage polarities the converter can generate.
| E.g. (0, 1) - Only positive voltages / (-1, 1) Positive and negative voltages
| *currents(tuple(float, float))*: Determines which output current polarities the converter can generate.
| E.g. (0, 1) - Only positive currents / (-1, 1) Positive and negative currents
"""
#: gym.Space that defines Minimum, Maximum and Dimension of possible output voltage of the converter
voltages = None
#: gym.Space that defines Minimum, Maximum and Dimension of possible output current of the converter
currents = None
#: gym.Space that defines the set of all possible actions for the converter
action_space = None
#: Default action that is taken after a reset.
_reset_action = None
def __init__(self, tau, dead_time=False, interlocking_time=0.0):
"""
:param tau: Discrete time step of the system in seconds
:param dead_time: Flag, if a system dead_time of one cycle should be considered.
:param interlocking_time: Interlocking time of the transistors in seconds
"""
self._tau = tau
self._dead_time = dead_time
self._dead_time_action = self._reset_action
self._current_action = self._reset_action
self._interlocking_time = interlocking_time
self._action_start_time = 0.0
def reset(self):
"""
Reset all converter states to a default.
Returns:
list(float): A default output voltage after reset(=0V).
"""
self._dead_time_action = self._reset_action
self._current_action = self._reset_action
self._action_start_time = 0.0
return [0.0]
def set_action(self, action, t):
"""
Set the next action of the converter at the beginning of a simulation step in the system.
Args:
action(element of action_space): The control action on the converter.
t(float): Time at the beginning of the simulation step in seconds.
Returns:
list(float): Times when a switching action occurs and the conversion function must be called by the system.
"""
if self._dead_time:
self._current_action = self._dead_time_action
self._dead_time_action = action
else:
self._current_action = action
self._action_start_time = t
return self._set_switching_pattern()
def i_sup(self, i_out):
"""
Calculate the current, the converter takes from the supply for the given output currents and the current
switching state.
Args:
i_out(list(float)): All currents flowing out of the converter and into the motor.
Returns:
float: The current drawn from the supply.
"""
raise NotImplementedError
def convert(self, i_out, t):
"""
The conversion function that converts the previously set action to an input voltage for the motor.
This function has to be called at least at every previously defined switching time, because the input voltage
for the motor might change at these times.
Args:
i_out(list(float)): All currents that flow out of the converter into the motor.
t(float): Current time of the system.
Returns:
list(float): List of all input voltages at the motor.
"""
raise NotImplementedError
def _set_switching_pattern(self):
"""
Method to calculate the switching pattern and corresponding switching times for the next time step.
At least, the next time step [t + tau] is returned.
Returns:
list(float): Switching times.
"""
self._switching_pattern = [self._current_action]
return [self._action_start_time + self._tau]
class NoConverter(PowerElectronicConverter):
"""Dummy Converter class used to directly transfer the supply voltage to the motor"""
# Dummy default values for voltages and currents.
# No real use other than to fit the current physical system architecture
voltages = Box(0, 1, shape=(3,), dtype=np.float64)
currents = Box(0, 1, shape=(3,), dtype=np.float64)
action_space = Box(low=np.array([]), high=np.array([]), dtype=np.float64)
def i_sup(self, i_out):
return i_out[0]
def convert(self, i_out, t):
return [1]
class ContDynamicallyAveragedConverter(PowerElectronicConverter):
"""
Base class for all continuously controlled converters that calculate the input voltages to the motor with a
dynamically averaged model over one time step.
This class also implements the interlocking time of the transistors as a discount on the output voltage.
"""
_reset_action = [0]
def __init__(self, tau=1e-4, **kwargs):
# Docstring in base class
super().__init__(tau=tau, **kwargs)
def set_action(self, action, t):
# Docstring in base class
return super().set_action(min(max(action, self.action_space.low), self.action_space.high), t)
def convert(self, i_out, t):
# Docstring in base class
return [min(max(self._convert(i_out, t) - self._interlock(i_out, t), self.voltages.low[0]), self.voltages.high[0])]
def _convert(self, i_in, t):
"""
Calculate an idealized output voltage for the current active action neglecting interlocking times.
Args:
i_in(list(float)): Input currents of the motor
t(float): Time of the system
Returns:
float: Idealized output voltage neglecting interlocking times
"""
raise NotImplementedError
def i_sup(self, i_out):
# Docstring in base class
raise NotImplementedError
def _interlock(self, i_in, *_):
"""
Calculate the output voltage discount due to the interlocking time of the transistors
Args:
i_in(list(float)): list of all currents flowing into the motor.
"""
return np.sign(i_in[0]) / self._tau * self._interlocking_time
class FiniteConverter(PowerElectronicConverter):
"""
Base class for all finite converters.
"""
#: The switching states of the converter for the current action
_switching_pattern = []
#: The current switching state of the converter
_switching_state = 0
#: The action that is the default after reset
_reset_action = 0
def __init__(self, tau=1e-5, **kwargs):
# Docstring in base class
super().__init__(tau=tau, **kwargs)
def set_action(self, action, t):
assert self.action_space.contains(action), \
f"The selected action {action} is not a valid element of the action space {self.action_space}."
return super().set_action(action, t)
def convert(self, i_out, t):
# Docstring in base class
raise NotImplementedError
def i_sup(self, i_out):
# Docstring in base class
raise NotImplementedError
class FiniteOneQuadrantConverter(FiniteConverter):
"""
Key:
'Finite-1QC'
Switching States / Actions:
| 0: Transistor off.
| 1: Transistor on.
Action Space:
Discrete(2)
Output Voltages and Currents:
| voltages: Box(0, 1, shape=(1,))
| currents: Box(0, 1, shape=(1,))
"""
voltages = Box(0, 1, shape=(1,), dtype=np.float64)
currents = Box(0, 1, shape=(1,), dtype=np.float64)
action_space = Discrete(2)
def convert(self, i_out, t):
# Docstring in base class
return [self._current_action if i_out[0] >= 0 else 1]
def i_sup(self, i_out):
# Docstring in base class
return i_out[0] if self._current_action == 1 else 0
class FiniteTwoQuadrantConverter(FiniteConverter):
"""
Key:
'Finite-2QC'
Switching States / Actions:
| 0: Both Transistors off.
| 1: Upper Transistor on.
| 2: Lower Transistor on.
Action Space:
Discrete(3)
Output Voltages and Currents:
| voltages: Box(0, 1, shape=(1,))
| currents: Box(-1, 1, shape=(1,))
"""
voltages = Box(0, 1, shape=(1,), dtype=np.float64)
currents = Box(-1, 1, shape=(1,), dtype=np.float64)
action_space = Discrete(3)
def convert(self, i_out, t):
# Docstring in base class
# Converter switches slightly (tau / 1000 seconds) before interlocking time due to inaccuracy of the solvers.
if t - self._tau / 1000 > self._action_start_time + self._interlocking_time:
self._switching_state = self._switching_pattern[-1]
else:
self._switching_state = self._switching_pattern[0]
if self._switching_state == 0:
if i_out[0] < 0:
return [1]
elif i_out[0] >= 0:
return [0.0]
elif self._switching_state == 1:
return [1]
elif self._switching_state == 2:
return [0.0]
else:
raise Exception('Invalid switching state of the converter')
def i_sup(self, i_out):
# Docstring in base class
if self._switching_state == 0:
return i_out[0] if i_out[0] < 0 else 0
elif self._switching_state == 1:
return i_out[0]
elif self._switching_state == 2:
return 0
else:
raise Exception('Invalid switching state of the converter')
def _set_switching_pattern(self):
# Docstring in base class
if (
self._current_action == 0
or self._switching_state == 0
or self._current_action == self._switching_state
or self._interlocking_time == 0
):
self._switching_pattern = [self._current_action]
return [self._action_start_time + self._tau]
else:
self._switching_pattern = [0, self._current_action]
return [self._action_start_time + self._interlocking_time, self._action_start_time + self._tau]
class FiniteFourQuadrantConverter(FiniteConverter):
"""
Key:
'Finite-4QC'
Switching States / Actions:
| 0: T2, T4 on.
| 1: T1, T4 on.
| 2: T2, T3 on.
| 3: T1, T3 on.
Action Space:
Discrete(4)
Output Voltages and Currents:
| Box(-1, 1, shape=(1,))
| Box(-1, 1, shape=(1,))
"""
voltages = Box(-1, 1, shape=(1,), dtype=np.float64)
currents = Box(-1, 1, shape=(1,), dtype=np.float64)
action_space = Discrete(4)
def __init__(self, **kwargs):
# Docstring in base class
super().__init__(**kwargs)
self._subconverters = [FiniteTwoQuadrantConverter(**kwargs), FiniteTwoQuadrantConverter(**kwargs)]
def reset(self):
# Docstring in base class
self._subconverters[0].reset()
self._subconverters[1].reset()
return super().reset()
def convert(self, i_out, t):
# Docstring in base class
return [self._subconverters[0].convert(i_out, t)[0] - self._subconverters[1].convert([-i_out[0]], t)[0]]
def set_action(self, action, t):
# Docstring in base class
assert self.action_space.contains(action), \
f"The selected action {action} is not a valid element of the action space {self.action_space}."
times = []
action0 = [1, 1, 2, 2][action]
action1 = [1, 2, 1, 2][action]
times += self._subconverters[0].set_action(action0, t)
times += self._subconverters[1].set_action(action1, t)
return sorted(list(set(times)))
def i_sup(self, i_out):
# Docstring in base class
return self._subconverters[0].i_sup(i_out) + self._subconverters[1].i_sup([-i_out[0]])
class ContOneQuadrantConverter(ContDynamicallyAveragedConverter):
"""
Key:
'Cont-1QC'
Action:
Duty Cycle of the Transistor in [0,1].
Action Space:
Box([0,1])
Output Voltages and Currents:
| voltages: Box(0, 1, shape=(1,))
| currents: Box(0, 1, shape=(1,))
"""
voltages = Box(0, 1, shape=(1,), dtype=np.float64)
currents = Box(0, 1, shape=(1,), dtype=np.float64)
action_space = Box(0, 1, shape=(1,), dtype=np.float64)
def _convert(self, i_in, *_):
# Docstring in base class
return self._current_action[0] if i_in[0] >= 0 else 1
def _interlock(self, *_):
# Docstring in base class
return 0
def i_sup(self, i_out):
# Docstring in base class
return self._current_action[0] * i_out[0]
class ContTwoQuadrantConverter(ContDynamicallyAveragedConverter):
"""
Key:
'Cont-2QC'
Actions:
| Duty Cycle upper Transistor: Action
| Duty Cycle upper Transistor: 1 - Action
Action Space:
Box([0,1])
Output Voltages and Currents:
| voltages: Box(0, 1, shape=(1,))
| currents: Box(-1, 1, shape=(1,))
"""
voltages = Box(0, 1, shape=(1,), dtype=np.float64)
currents = Box(-1, 1, shape=(1,), dtype=np.float64)
action_space = Box(0, 1, shape=(1,), dtype=np.float64)
def _convert(self, *_):
# Docstring in base class
return self._current_action[0]
def i_sup(self, i_out):
# Docstring in base class
interlocking_current = 1 if i_out[0] < 0 else 0
return (
self._current_action[0]
+ self._interlocking_time / self._tau * (interlocking_current - self._current_action[0])
) * i_out[0]
class ContFourQuadrantConverter(ContDynamicallyAveragedConverter):
"""
The continuous four quadrant converter (4QC) is simulated with two continuous 2QC.
Key:
'Cont-4QC'
Actions:
| Duty Cycle Transistor T1: 0.5 * (Action + 1)
| Duty Cycle Transistor T2: 1 - 0.5 * (Action + 1)
| Duty Cycle Transistor T3: 1 - 0.5 * (Action + 1)
| Duty Cycle Transistor T4: 0.5 * (Action + 1)
Action Space:
Box(-1, 1, shape=(1,))
Output Voltages and Currents:
| voltages: Box(-1, 1, shape=(1,))
| currents: Box(-1, 1, shape=(1,))
"""
voltages = Box(-1, 1, shape=(1,), dtype=np.float64)
currents = Box(-1, 1, shape=(1,), dtype=np.float64)
action_space = Box(-1, 1, shape=(1,), dtype=np.float64)
def __init__(self, **kwargs):
# Docstring in base class
super().__init__(**kwargs)
self._subconverters = [ContTwoQuadrantConverter(**kwargs), ContTwoQuadrantConverter(**kwargs)]
def _convert(self, *_):
# Not used here
pass
def reset(self):
# Docstring in base class
self._subconverters[0].reset()
self._subconverters[1].reset()
return super().reset()
def convert(self, i_out, t):
# Docstring in base class
return [self._subconverters[0].convert(i_out, t)[0] - self._subconverters[1].convert(i_out, t)[0]]
def set_action(self, action, t):
# Docstring in base class
super().set_action(action, t)
times = []
times += self._subconverters[0].set_action([0.5 * (action[0] + 1)], t)
times += self._subconverters[1].set_action([-0.5 * (action[0] - 1)], t)
return sorted(list(set(times)))
def i_sup(self, i_out):
# Docstring in base class
return self._subconverters[0].i_sup(i_out) + self._subconverters[1].i_sup([-i_out[0]])
class FiniteMultiConverter(FiniteConverter):
"""
Converter that allows to include an arbitrary number of independent finite subconverters.
Subconverters must be 'elementary' and can not be MultiConverters.
Key:
'Finite-Multi'
Actions:
Concatenation of the subconverters' action spaces
Action Space:
MultiDiscrete([subconverter[0].action_space.n , subconverter[1].action_space.n, ...])
Output Voltage Space:
Box([subconverter[0].voltages.low, subconverter[1].voltages.low, ...],
[subconverter[0].voltages.high, subconverter[1].voltages.high, ...])
"""
@property
def subconverters(self):
return self._subconverters
def __init__(self, subconverters, **kwargs):
"""
Args:
subconverters(list(str/class/object): Subconverters to instantiate .
kwargs(dict): Parameters to pass to the Subconverters and the superclass
"""
super().__init__(**kwargs)
self._subconverters = [
instantiate(PowerElectronicConverter, subconverter, **kwargs) for subconverter in subconverters
]
self.subsignal_current_space_dims = []
self.subsignal_voltage_space_dims = []
self.action_space = []
currents_low = []
currents_high = []
voltages_low = []
voltages_high = []
# get the limits and space dims from each subconverter
for subconverter in self._subconverters:
self.subsignal_current_space_dims.append(np.squeeze(subconverter.currents.shape) or 1)
self.subsignal_voltage_space_dims.append(np.squeeze(subconverter.voltages.shape) or 1)
self.action_space.append(subconverter.action_space.n)
currents_low.append(subconverter.currents.low)
currents_high.append(subconverter.currents.high)
voltages_low.append(subconverter.voltages.low)
voltages_high.append(subconverter.voltages.high)
# convert to 1D list
self.subsignal_current_space_dims = np.array(self.subsignal_current_space_dims)
self.subsignal_voltage_space_dims = np.array(self.subsignal_voltage_space_dims)
currents_low = np.concatenate(currents_low)
currents_high = np.concatenate(currents_high)
voltages_low = np.concatenate(voltages_low)
voltages_high = np.concatenate(voltages_high)
# put limits into gym_space format
self.action_space = MultiDiscrete(self.action_space)
self.currents = Box(currents_low, currents_high, dtype=np.float64)
self.voltages = Box(voltages_low, voltages_high, dtype=np.float64)
def convert(self, i_out, t):
# Docstring in base class
u_in = []
subsignal_idx_low = 0
for subconverter, subsignal_space_size in zip(self._subconverters, self.subsignal_voltage_space_dims):
subsignal_idx_high = subsignal_idx_low + subsignal_space_size
u_in += subconverter.convert(i_out[subsignal_idx_low:subsignal_idx_high], t)
subsignal_idx_low = subsignal_idx_high
return u_in
def reset(self):
# Docstring in base class
u_in = []
for subconverter in self._subconverters:
u_in += subconverter.reset()
return u_in
def set_action(self, action, t):
# Docstring in base class
times = []
for subconverter, sub_action in zip(self._subconverters, action):
times += subconverter.set_action(sub_action, t)
return sorted(list(set(times)))
def i_sup(self, i_out):
# Docstring in base class
i_sup = 0
subsignal_idx_low = 0
for subconverter, subsignal_space_size in zip(self._subconverters, self.subsignal_current_space_dims):
subsignal_idx_high = subsignal_idx_low + subsignal_space_size
i_sup += subconverter.i_sup(i_out[subsignal_idx_low:subsignal_idx_high])
subsignal_idx_low = subsignal_idx_high
return i_sup
class ContMultiConverter(ContDynamicallyAveragedConverter):
"""
Converter that allows to include an arbitrary number of independent continuous subconverters.
Subconverters must be 'elementary' and can not be MultiConverters.
Key:
'Cont-Multi'
Actions:
Concatenation of the subconverters' action spaces
Action Space:
Box([subconverter[0].action_space.low, subconverter[1].action_space.low, ...],
[subconverter[0].action_space.high, subconverter[1].action_space.high, ...])
Output Voltage Space:
Box([subconverter[0].voltages.low, subconverter[1].voltages.low, ...],
[subconverter[0].voltages.high, subconverter[1].voltages.high, ...])
"""
def __init__(self, subconverters, **kwargs):
"""
Args:
subconverters(list(str/class/object): Subconverters to instantiate .
kwargs(dict): Parameters to pass to the Subconverters
"""
super().__init__(**kwargs)
self._subconverters = [instantiate(PowerElectronicConverter, subconverter, **kwargs) for subconverter in subconverters]
self.subsignal_current_space_dims = []
self.subsignal_voltage_space_dims = []
action_space_low = []
action_space_high = []
currents_low = []
currents_high = []
voltages_low = []
voltages_high = []
# get the limits and space dims from each subconverter
for subconverter in self._subconverters:
self.subsignal_current_space_dims.append(np.squeeze(subconverter.currents.shape) or 1)
self.subsignal_voltage_space_dims.append(np.squeeze(subconverter.voltages.shape) or 1)
action_space_low.append(subconverter.action_space.low)
action_space_high.append(subconverter.action_space.high)
currents_low.append(subconverter.currents.low)
currents_high.append(subconverter.currents.high)
voltages_low.append(subconverter.voltages.low)
voltages_high.append(subconverter.voltages.high)
# convert to 1D list
self.subsignal_current_space_dims = np.array(self.subsignal_current_space_dims)
self.subsignal_voltage_space_dims = np.array(self.subsignal_voltage_space_dims)
action_space_low = np.concatenate(action_space_low)
action_space_high = np.concatenate(action_space_high)
currents_low = np.concatenate(currents_low)
currents_high = np.concatenate(currents_high)
voltages_low = np.concatenate(voltages_low)
voltages_high = np.concatenate(voltages_high)
# put limits into gym_space format
self.action_space = Box(action_space_low, action_space_high, dtype=np.float64)
self.currents = Box(currents_low, currents_high, dtype=np.float64)
self.voltages = Box(voltages_low, voltages_high, dtype=np.float64)
def set_action(self, action, t):
# Docstring in base class
times = []
ind = 0
for subconverter in self._subconverters:
sub_action = action[ind:ind + subconverter.action_space.shape[0]]
ind += subconverter.action_space.shape[0]
times += subconverter.set_action(sub_action, t)
return sorted(list(set(times)))
def reset(self):
# Docstring in base class
u_in = []
for subconverter in self._subconverters:
u_in += subconverter.reset()
return u_in
def convert(self, i_out, t):
# Docstring in base class
u_in = []
subsignal_idx_low = 0
for subconverter, subsignal_space_size in zip(self._subconverters, self.subsignal_voltage_space_dims):
subsignal_idx_high = subsignal_idx_low + subsignal_space_size
u_in += subconverter.convert(i_out[subsignal_idx_low:subsignal_idx_high], t)
subsignal_idx_low = subsignal_idx_high
return u_in
def _convert(self, i_in, t):
# Not used
pass
def i_sup(self, i_out):
# Docstring in base class
i_sup = 0
subsignal_idx_low = 0
for subconverter, subsignal_space_size in zip(self._subconverters, self.subsignal_current_space_dims):
subsignal_idx_high = subsignal_idx_low + subsignal_space_size
i_sup += subconverter.i_sup(i_out[subsignal_idx_low:subsignal_idx_high])
subsignal_idx_low = subsignal_idx_high
return i_sup
class FiniteB6BridgeConverter(FiniteConverter):
"""
The finite B6 bridge converters (B6C) is simulated with three finite 2QC.
Key:
'Finite-B6C'
Actions:
+-+-----+-----+-----+
| |H_1 |H_2 |H_3 |
+=+=====+=====+=====+
|0|lower|lower|lower|
+-+-----+-----+-----+
|1|lower|lower|upper|
+-+-----+-----+-----+
|2|lower|upper|lower|
+-+-----+-----+-----+
|3|lower|upper|upper|
+-+-----+-----+-----+
|4|upper|lower|lower|
+-+-----+-----+-----+
|5|upper|lower|upper|
+-+-----+-----+-----+
|6|upper|upper|lower|
+-+-----+-----+-----+
|7|upper|upper|upper|
+-+-----+-----+-----+
Action Space:
Discrete(8)
Output Voltages and Currents:
| voltages: Box(-1,1, shape=(3,))
| currents: Box(-1,1, shape=(3,))
Output Voltage Space:
Box(-0.5, 0.5, shape=(3,))
"""
action_space = Discrete(8)
# Only positive voltages can be applied
voltages = Box(-1, 1, shape=(3,), dtype=np.float64)
# positive and negative currents are possible
currents = Box(-1, 1, shape=(3,), dtype=np.float64)
_reset_action = 0
_subactions = [
[2, 2, 2],
[2, 2, 1],
[2, 1, 2],
[2, 1, 1],
[1, 2, 2],
[1, 2, 1],
[1, 1, 2],
[1, 1, 1]
]
def __init__(self, tau=1e-5, **kwargs):
# Docstring in base class
super().__init__(tau=tau, **kwargs)
self._subconverters = [
FiniteTwoQuadrantConverter(tau=tau, **kwargs),
FiniteTwoQuadrantConverter(tau=tau, **kwargs),
FiniteTwoQuadrantConverter(tau=tau, **kwargs),
]
def reset(self):
# Docstring in base class
return [
self._subconverters[0].reset()[0] - 0.5,
self._subconverters[1].reset()[0] - 0.5,
self._subconverters[2].reset()[0] - 0.5,
]
def convert(self, i_out, t):
# Docstring in base class
u_out = [
self._subconverters[0].convert([i_out[0]], t)[0] - 0.5,
self._subconverters[1].convert([i_out[1]], t)[0] - 0.5,
self._subconverters[2].convert([i_out[2]], t)[0] - 0.5
]
return u_out
def set_action(self, action, t):
# Docstring in base class
assert self.action_space.contains(action), \
f"The selected action {action} is not a valid element of the action space {self.action_space}."
subactions = self._subactions[action]
times = []
times += self._subconverters[0].set_action(subactions[0], t)
times += self._subconverters[1].set_action(subactions[1], t)
times += self._subconverters[2].set_action(subactions[2], t)
return sorted(list(set(times)))
def i_sup(self, i_out):
# Docstring in base class
return sum([subconverter.i_sup([i_out_]) for subconverter, i_out_ in zip(self._subconverters, i_out)])
class ContB6BridgeConverter(ContDynamicallyAveragedConverter):
"""
The continuous B6 bridge converter (B6C) is simulated with three continuous 2QC.
Key:
'Cont-B6C'
Actions:
The Duty Cycle for each half bridge in the range of (-1,1)
Action Space:
Box(-1, 1, shape=(3,))
Output Voltages and Currents:
| voltages: Box(-1,1, shape=(3,))
| currents: Box(-1,1, shape=(3,))
Output Voltage Space:
Box(-0.5, 0.5, shape=(3,))
"""
action_space = Box(-1, 1, shape=(3,), dtype=np.float64)
# Only positive voltages can be applied
voltages = Box(-1, 1, shape=(3,), dtype=np.float64)
# Positive and negative currents are possible
currents = Box(-1, 1, shape=(3,), dtype=np.float64)
_reset_action = [0, 0, 0]
def __init__(self, tau=1e-4, **kwargs):
# Docstring in base class
super().__init__(tau=tau, **kwargs)
self._subconverters = [
ContTwoQuadrantConverter(tau=tau, **kwargs),
ContTwoQuadrantConverter(tau=tau, **kwargs),
ContTwoQuadrantConverter(tau=tau, **kwargs),
]
def reset(self):
# Docstring in base class
return [
self._subconverters[0].reset()[0] - 0.5,
self._subconverters[1].reset()[0] - 0.5,
self._subconverters[2].reset()[0] - 0.5,
]
def convert(self, i_out, t):
# Docstring in base class
u_out = [
self._subconverters[0].convert([i_out[0]], t)[0] - 0.5,
self._subconverters[1].convert([i_out[1]], t)[0] - 0.5,
self._subconverters[2].convert([i_out[2]], t)[0] - 0.5
]
return u_out
def set_action(self, action, t):
# Docstring in base class
times = []
times += self._subconverters[0].set_action([0.5 * (action[0] + 1)], t)
times += self._subconverters[1].set_action([0.5 * (action[1] + 1)], t)
times += self._subconverters[2].set_action([0.5 * (action[2] + 1)], t)
return sorted(list(set(times)))
def _convert(self, i_in, t):
# Not used
pass
def i_sup(self, i_out):
# Docstring in base class
return sum([subconverter.i_sup([i_out_]) for subconverter, i_out_ in zip(self._subconverters, i_out)])
|
the-stack_106_25058 | """ ZADANIE 1. GENERATOR KODÓW POCZTOWYCH
przyjmuje 2 stringi: "79-900" i "80-155" i zwraca listę kodów pomiędzy nimi """
def post_code_generator(p_code1, p_code2):
p_code1 = int(p_code1.replace('-', ''))
p_code2 = int(p_code2.replace('-', ''))
return ["%02d-%03d" % divmod(x, 1000) for x in range(p_code1 + 1, p_code2)]
print(post_code_generator("79-900", "80-155"))
|
the-stack_106_25059 |
import json
import watson_developer_cloud
ASSISTANT_ID="<Enter your ASSISTANT_ID>"
assistant = watson_developer_cloud.AssistantV2(
iam_apikey='<Enter your API key>',
version='2018-11-08',
url='<Enter your assistant url that you set while creating the assistant>'# eg:'https://gateway-lon.watsonplatform.net/assistant/api' for london server
)
#creates a session with the api
session = assistant.create_session("ASSISTANT_ID").get_result()
print(json.dumps(session, indent=2))
#send your query to the api
#response variable recieves the json object with the response from api and the intent and extent detected
response = assistant.message(
assistant_id='ASSISTANT_ID',
session_id=session["session_id"],
input={
'message_type': 'text',
'text': 'When are you open'
}
).get_result()
print(json.dumps(response, indent=2))
#used to close the session
resp=assistant.delete_session("ASSISTANT_ID", session["session_id"]).get_result()
print(json.dumps(resp, indent=2)) |
the-stack_106_25060 | import csv
import operator
important_rank = 6
def process_opioid_table(list_table):
patient_id = 0
data = {}
for table_loc in list_table:
with open(table_loc, 'rb') as csvfile:
data_reader = csv.reader(csvfile, delimiter=',')
header = True
for row in data_reader:
if header:
header = False
continue
this_patient_data = {}
this_patient_data['death_date'] = row[0]
this_patient_data['death_time'] = row[1]
if row[2] == 'Accident':
manner = 0
else: # Accidents
manner = 1
this_patient_data['manner'] = manner
this_patient_data['age'] = row[3]
this_patient_data['gender'] = row[4]
this_patient_data['race'] = row[5]
this_patient_data['case_dispo'] = row[6]
# 7 ~ 13 (inclusive) 7 kinds of doses
this_patient_data['doses'] = []
for i in range(7, 14):
if row[i] != '':
this_patient_data['doses'].append(row[i])
# incident_zip 14
this_patient_data['incident_zip'] = row[14]
# decedent_zip 15
this_patient_data['decedent_zip'] = row[15]
# case year 16
this_patient_data['case_year'] = row[16]
# store this patient's info
data[patient_id] = this_patient_data
patient_id += 1
return data, patient_id
def sort_important_doses(num_rank, data_list):
all_kinds = {}
for data in data_list:
for patient_id in data:
doses = data[patient_id]['doses']
for d in doses:
if not d in all_kinds:
all_kinds[d] = 1
else:
all_kinds[d] += 1
sorted_all_kinds = sorted(all_kinds.items(), key=operator.itemgetter(1))[::-1]
selected = []
selected_num = 0
total_num = sum(all_kinds.values())
for i in range(num_rank):
d, num = sorted_all_kinds[i]
selected_num += num
selected.append(d)
percentage = float(selected_num) / float(total_num)
return all_kinds, selected, percentage
if __name__ == "__main__":
# 2015
data, num_patients = process_opioid_table(['fatal_accidental_od_2015.csv','fatal_accidental_od_2016.csv'])
all_kinds, selected, percentage = sort_important_doses(important_rank, [data])
print(selected)
|
the-stack_106_25061 | import subprocess
from autonmt.api import NO_VENV_MSG
def cmd_spm_encode(model_path, input_file, output_file, venv_path=None):
print("\t- [INFO]: Using 'SentencePiece' from the command line.")
env = f"{venv_path}" if venv_path else NO_VENV_MSG
cmd = f"spm_encode --model={model_path} --output_format=piece < {input_file} > {output_file}" # --vocabulary={model_path}.vocab --vocabulary_threshold={min_vocab_frequency}
subprocess.call(['/bin/bash', '-c', f"{env} && {cmd}"])
return cmd
def cmd_spm_decode(model_path, input_file, output_file, venv_path=None):
print("\t- [INFO]: Using 'SentencePiece' from the command line.")
env = f"{venv_path}" if venv_path else NO_VENV_MSG
cmd = f"spm_decode --model={model_path} --input_format=piece < {input_file} > {output_file}"
subprocess.call(['/bin/bash', '-c', f"{env} && {cmd}"])
return cmd
def cmd_spm_train(input_file, model_prefix, subword_model, vocab_size, input_sentence_size, character_coverage, byte_fallback, venv_path=None):
print("\t- [INFO]: Using 'SentencePiece' from the command line.")
# Add extra options
extra = ""
extra += " --byte_fallback" if byte_fallback else ""
extra += f" --character_coverage={character_coverage}" if character_coverage else ""
# Numbers are not included in the vocabulary (...and digits are not split, even with: --split_digits)
# https://github.com/google/sentencepiece/blob/master/doc/options.md
env = f"{venv_path}" if venv_path else NO_VENV_MSG
cmd = f"spm_train --input={input_file} --model_prefix={model_prefix} --vocab_size={vocab_size} --model_type={subword_model} --input_sentence_size={input_sentence_size} --pad_id=3 {extra}"
subprocess.call(['/bin/bash', '-c', f"{env} && {cmd}"])
return cmd
def cmd_moses_tokenizer(input_file, output_file, lang, venv_path=None):
print("\t- [INFO]: Using 'Sacremoses' from the command line.")
env = f"{venv_path}" if venv_path else NO_VENV_MSG
cmd = f"sacremoses -l {lang} -j$(nproc) tokenize < {input_file} > {output_file}"
subprocess.call(['/bin/bash', '-c', f"{env} && {cmd}"])
return cmd
def cmd_moses_detokenizer(input_file, output_file, lang, venv_path=None):
print("\t- [INFO]: Using 'Sacremoses' from the command line.")
env = f"{venv_path}" if venv_path else NO_VENV_MSG
cmd = f"sacremoses -l {lang} -j$(nproc) detokenize < {input_file} > {output_file}"
subprocess.call(['/bin/bash', '-c', f"{env} && {cmd}"])
return cmd
|
the-stack_106_25062 | from .db import get_connection, get_data
from flask import render_template, url_for
def do_db_info():
context = {'db': True}
conn = get_connection()
cur = conn.cursor()
column_names, rows = get_data(cur, "SELECT * FROM PRINTERS")
context['printers_cols'] = column_names
context['printers_rows'] = rows
column_names, rows = get_data(cur, "SELECT * FROM PRINTER_QUEUE ORDER BY PRINTER_ID, PRIORITY")
context['printer_queue_cols'] = column_names
context['printer_queue_rows'] = rows
column_names, rows = get_data(cur, "SELECT * FROM PRINTER_QUEUE WHERE PRINTER_ID IS NULL")
context['unassigned_queue_cols'] = column_names
context['unassigned_queue_rows'] = rows
return render_template('db.html', **context)
|
the-stack_106_25069 | import pandas as pd
import numpy as np
from RetailChurnTemplateUtility import *
def azureml_main(df1=None, df2=None):
key_column = 'UserId'
IsDevelopment = True
# Feature Engineering
churnUtil = RetailChurnTemplateUtility()
# Return value must be of a sequence of pandas.DataFrame
return churnUtil.calculateAverages(df1,
df2,
key_column,
uniquable_columns=df2.columns,
summable_columns=df1.columns)
|
the-stack_106_25070 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation of the NeuTra Kernel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import dtype_util
tfb = tfp.bijectors
tfd = tfp.distributions
__all__ = ['NeuTra', 'make_iaf_stack']
def make_iaf_stack(total_event_size,
num_hidden_layers=2,
seed=None,
dtype=tf.float32):
"""Creates an stacked IAF bijector.
This bijector operates on vector-valued events.
Args:
total_event_size: Number of dimensions to operate over.
num_hidden_layers: How many hidden layers to use in each IAF.
seed: Random seed for the initializers.
dtype: DType for the variables.
Returns:
bijector: The created bijector.
"""
seed = tfp.util.SeedStream(seed, 'make_iaf_stack')
def make_iaf():
"""Create an IAF."""
initializer = tf.compat.v2.keras.initializers.VarianceScaling(
2 * 0.01, seed=seed() % (2**31 - 1))
made = tfb.AutoregressiveLayer(
params=2,
event_shape=[total_event_size],
hidden_units=[total_event_size] * num_hidden_layers,
activation=tf.nn.elu,
kernel_initializer=initializer,
dtype=dtype)
def shift_and_scale(x):
# TODO(siege): Something is losing the static shape.
x.set_shape(
x.shape.merge_with([None] * (x.shape.ndims - 1) + [total_event_size]))
return tf.unstack(made(x), num=2, axis=-1)
return tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_and_scale))
def make_swap():
"""Create an swap."""
permutation = list(reversed(range(total_event_size)))
return tfb.Permute(permutation)
bijector = make_iaf()
bijector = make_swap()(bijector)
bijector = make_iaf()(bijector)
bijector = make_swap()(bijector)
bijector = make_iaf()(bijector)
bijector = make_swap()(bijector)
return bijector
class NeuTra(tfp.mcmc.TransitionKernel):
"""The experimental NeuTra kernel.
Warning: This kernel is experimental. Default arguments and their
interpreations will very likely change.
This kernel does not work in TF1 graph mode.
This implements a transition kernel that implements the NeuTra MCMC
[(Hoffman et al., 2019)][1]. It operates by learning a neural pre-conditioner
(bijector) inside the `bootstrap_results` method, and then using it inside
`one_step` to accelerate HMC. The same bijector is used to initialize the
state: i.e. the `current_state` as passed to `tfp.mcmc.sample_chain` is
ignored except to extract the number of chains to sample in parallel.
This kernel performs step-size adaptation and picks an automatic trajectory
length for its internal HMC kernel.
If your problem has constrained support, specify this support via the
`unconstraining_bijector` argument. This argument is interpreted such that the
image of the forward transformation of that bijector matches the support of
your problem. E.g. if one of your random variables is positive, you could use
the `tfb.Softplus` bijector.
Since this is still experimental, we provide some facilities to debug the
bijector training via the `train_debug_fn`, where you can monitor the training
progress. In practice, it may be prudent to run the training process multiple
times to verify that the variational approximation is stable. If it is not,
you can attempt to increase the expressiveness of the bijector via the
`trainable_bijector_fn` argument. Currently the bijector operates by
flattening the entire problem state into one vector. This is seamless to the
user, but is important to remember when designing a bijector. The flattening
will likely be removed in the future.
`_flattened_variational_distribution` returns the (flattened) distribution
resulting from a standard normal being pushed through the bijector. This is
useful for debugging, as this can be compared to the MCMC chain and your prior
preconception of what the target distribution should look like.
Additionally, you can examine the `inner_results.transformed_state` inside the
kernel results. By construction, for a well-fitted bijector this should
resemble a standard normal. This kernel assumes that you have a well-fitted
bijector, so if samples in the transformed space do not look normal, then this
kernel is not operating efficiently.
### Examples
Sampling from a multivariate log-normal distribution.
```python
target_dist = tfd.MultivariateNormalTriL(scale_tril=[[1., 0.], [2, 1.]])
target_dist = tfb.Exp()(target_dist)
num_chains = 64
state_shape = 2
num_burnin_steps = 1000
kernel = NeuTra(
target_log_prob_fn=target_dist.log_prob,
state_shape=state_shape,
num_step_size_adaptation_steps=int(0.8 * num_burnin_steps),
unconstraining_bijector=tfb.Exp())
chain = tfp.mcmc.sample_chain(
num_results=1000,
num_burnin_steps=1000,
current_state=tf.ones([num_chains, 2]),
kernel=kernel,
trace_fn=None)
```
#### References
[1]: Hoffman, M., Sountsov, P., Dillon, J. V., Langmore, I., Tran, D., &
Vasudevan, S. (2019). NeuTra-lizing Bad Geometry in Hamiltonian Monte
Carlo Using Neural Transport. http://arxiv.org/abs/1903.03704
"""
def __init__(self,
target_log_prob_fn,
state_shape,
num_step_size_adaptation_steps,
unconstraining_bijector=None,
trainable_bijector_fn=make_iaf_stack,
learning_rate=1e-2,
train_batch_size=4096,
num_train_steps=5000,
train_debug_fn=None,
seed=None):
"""Creates the kernel.
Args:
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
state_shape: A Python list, or list of Python lists (or `TensorShape`s)
that describes the shape of the problem state. Must be the same
structure as `current_state`. All shapes must be fully defined.
num_step_size_adaptation_steps: Number of steps to use for step size
adaptation. See `tfp.mcmc.SimpleStepSizeAdaptation`.
unconstraining_bijector: A bijector or list of bijectors that go from
unconstrained space to the support of the corresponding random variable.
Must be the same structure as `current_state`.
trainable_bijector_fn: Creates a trainable, vector-valued event bijector.
Must be a callable with signature: `(total_event_size, seed) ->
bijector`, where `total_event_size` is the size of the event.
learning_rate: Base learning rate to use for training the bijector.
Internally, learning rate decay is used to stabilize learning.
train_batch_size: Batch size to use for training the bijector.
num_train_steps: Number of training steps to train the bijector.
train_debug_fn: A callable with signature `(NeuTra, step, loss)` called
for every training step. The first argument is this instance, and `step`
is the current training step.
seed: A seed for reproducibility.
"""
self._parameters = dict(
target_log_prob_fn=target_log_prob_fn,
state_shape=state_shape,
num_step_size_adaptation_steps=num_step_size_adaptation_steps,
unconstraining_bijector=unconstraining_bijector,
trainable_bijector_fn=trainable_bijector_fn,
learning_rate=learning_rate,
train_batch_size=train_batch_size,
num_train_steps=num_train_steps,
train_debug_fn=train_debug_fn,
seed=seed)
self._state_shape = tf.nest.map_structure(tf.TensorShape, state_shape)
if unconstraining_bijector is None:
unconstraining_bijector = tf.nest.map_structure(lambda _: tfb.Identity(),
self.state_shape)
self._unconstraining_bijector = unconstraining_bijector
def _make_reshaped_bijector(b, s):
return tfb.Reshape(
event_shape_in=s, event_shape_out=[s.num_elements()])(b)(
tfb.Reshape(event_shape_out=b.inverse_event_shape(s)))
# This converts the `unconstraining_bijector` to work on flattened state
# parts.
reshaped_bijector = tf.nest.map_structure(_make_reshaped_bijector,
unconstraining_bijector,
self.state_shape)
blockwise_bijector = tfb.Blockwise(
bijectors=tf.nest.flatten(reshaped_bijector),
block_sizes=tf.nest.flatten(
tf.nest.map_structure(lambda s: s.num_elements(),
self.state_shape)))
dtype = dtype_util.common_dtype([learning_rate], dtype_hint=tf.float32)
self._dtype = dtype
trainable_bijector = trainable_bijector_fn(
self._total_event_size, seed=seed, dtype=dtype)
self._trainable_bijector = trainable_bijector
self._bijector = blockwise_bijector(trainable_bijector)
def flattened_target_log_prob(flat_state):
state = self._unflatten_state(flat_state)
if isinstance(state, (list, tuple)):
return target_log_prob_fn(*state)
else:
return target_log_prob_fn(state)
self._flattened_target_log_prob_val = flattened_target_log_prob
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=flattened_target_log_prob,
step_size=1.,
num_leapfrog_steps=self._num_leapfrog_steps(1.),
seed=seed if not tf.executing_eagerly() else None)
kernel = tfp.mcmc.TransformedTransitionKernel(kernel, self._bijector)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(
kernel,
num_adaptation_steps=num_step_size_adaptation_steps,
target_accept_prob=np.array(0.9, dtype.as_numpy_dtype()))
self._kernel = kernel
@property
def trainable_bijector(self):
return self._trainable_bijector
@property
def unconstraining_bijector(self):
return self._unconstraining_bijector
@property
def state_shape(self):
return self._state_shape
@property
def train_batch_size(self):
return self._parameters['train_batch_size']
@property
def num_train_steps(self):
return self._parameters['num_train_steps']
@property
def learning_rate(self):
return self._parameters['learning_rate']
@property
def train_debug_fn(self):
return self._parameters['train_debug_fn']
@property
def seed(self):
return self._parameters['seed']
@property
def _total_event_size(self):
return sum(
tf.nest.flatten(
tf.nest.map_structure(
lambda b, s: b.inverse_event_shape(s).num_elements(),
self.unconstraining_bijector, self.state_shape)))
def _num_leapfrog_steps(self, step_size):
step_size = tf.convert_to_tensor(value=step_size)
trajectory_length = np.float32(self._total_event_size)**0.25
return tf.cast(tf.math.ceil(trajectory_length / step_size), dtype=tf.int32)
def _flatten_state(self, state):
state_parts = tf.nest.flatten(state)
flat_state_shapes = tf.nest.flatten(self.state_shape)
batch_shape = tf.shape(input=state_parts[0])[:-flat_state_shapes[0].ndims]
flat_shape = tf.concat([batch_shape, [-1]], -1)
flat_state_parts = tf.nest.map_structure(
lambda s: tf.reshape(s, flat_shape), state_parts)
return tf.concat(flat_state_parts, -1)
def _unflatten_state(self, flat_state):
state_parts = tf.split(
flat_state,
[s.num_elements() for s in tf.nest.flatten(self.state_shape)],
axis=-1)
batch_shape = tf.shape(input=flat_state)[:-1]
state = tf.nest.pack_sequence_as(self.state_shape, state_parts)
return tf.nest.map_structure(
lambda part, s: tf.reshape(part, tf.concat([batch_shape, s], 0)), state,
self.state_shape)
def is_calibrated(self):
return True
def _flattened_variational_distribution(self):
base = tfd.MultivariateNormalDiag(
loc=tf.zeros(self._total_event_size, dtype=self._dtype))
return self._bijector(base)
@property
def _flattened_target_log_prob(self):
return self._flattened_target_log_prob_val
@tf.function(autograph=False)
def one_step(self, current_state, previous_kernel_results):
"""Runs one iteration of NeuTra.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
@tfp.mcmc.internal.util.make_innermost_setter
def set_num_leapfrog_steps(kernel_results, num_leapfrog_steps):
return kernel_results._replace(
accepted_results=kernel_results.accepted_results._replace(
num_leapfrog_steps=num_leapfrog_steps))
step_size = previous_kernel_results.new_step_size
previous_kernel_results = set_num_leapfrog_steps(
previous_kernel_results, self._num_leapfrog_steps(step_size))
new_state, kernel_results = self._kernel.one_step(
self._flatten_state(current_state), previous_kernel_results)
return self._unflatten_state(new_state), kernel_results
def bootstrap_results(self, state):
"""Trains the bijector and creates initial `previous_kernel_results`.
The supplied `state` is only used to determine the number of chains to run
in parallel_iterations
Args:
state: `Tensor` or Python `list` of `Tensor`s representing the initial
state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*state))`.
Returns:
kernel_results: Instance of
`UncalibratedHamiltonianMonteCarloKernelResults` inside
`MetropolisHastingsResults` inside `TransformedTransitionKernelResults`
inside `SimpleStepSizeAdaptationResults`.
"""
def loss():
q = self._flattened_variational_distribution()
# TODO(siege): How to seed this?
samples = q.sample(self.train_batch_size)
return tf.reduce_mean(
input_tensor=q.log_prob(samples) -
self._flattened_target_log_prob(samples),
axis=-1)
lr = tf.convert_to_tensor(value=self.learning_rate, dtype=self._dtype)
dtype = lr.dtype
learning_rate = tf.compat.v2.optimizers.schedules.PiecewiseConstantDecay(
list(self.num_train_steps *
np.array([0.2, 0.8]).astype(dtype.as_numpy_dtype())),
[lr, lr * 0.1, lr * 0.01])
opt = tf.compat.v2.optimizers.Adam(learning_rate)
@tf.function(autograph=False)
def train_step():
with tf.GradientTape() as tape:
loss_val = loss()
vals = tape.watched_variables()
grads = tape.gradient(loss_val, vals)
grads_and_vals = list(zip(grads, vals))
opt.apply_gradients(grads_and_vals)
return loss_val
for step in range(self.num_train_steps):
loss_val = train_step()
tf.debugging.assert_all_finite(
loss_val, 'NeuTra loss is NaN at step {}'.format(step))
if self.train_debug_fn:
# pylint: disable=not-callable
self.train_debug_fn(self, step, loss_val)
state_parts = tf.nest.flatten(state)
flat_state_shapes = tf.nest.flatten(self.state_shape)
batch_shape = tf.shape(input=state_parts[0])[:-flat_state_shapes[0].ndims]
return self._kernel.bootstrap_results(
self._flattened_variational_distribution().sample(
batch_shape, seed=self.seed))
|
the-stack_106_25071 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class QuotasAdminNegativeTestBase(base.BaseV2ComputeAdminTest):
force_tenant_isolation = True
@classmethod
def setup_clients(cls):
super(QuotasAdminNegativeTestBase, cls).setup_clients()
cls.client = cls.os_primary.quotas_client
cls.adm_client = cls.os_admin.quotas_client
cls.sg_client = cls.security_groups_client
cls.sgr_client = cls.security_group_rules_client
@classmethod
def resource_setup(cls):
super(QuotasAdminNegativeTestBase, cls).resource_setup()
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.client.tenant_id
def _update_quota(self, quota_item, quota_value):
quota_set = (self.adm_client.show_quota_set(self.demo_tenant_id)
['quota_set'])
default_quota_value = quota_set[quota_item]
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
**{quota_item: quota_value})
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
**{quota_item: default_quota_value})
class QuotasAdminNegativeTest(QuotasAdminNegativeTestBase):
"""Negative tests of nova quotas"""
@decorators.attr(type=['negative'])
@decorators.idempotent_id('733abfe8-166e-47bb-8363-23dbd7ff3476')
def test_update_quota_normal_user(self):
"""Test updating nova quota by normal user should fail"""
self.assertRaises(lib_exc.Forbidden,
self.client.update_quota_set,
self.demo_tenant_id,
ram=0)
# TODO(afazekas): Add dedicated tenant to the skipped quota tests.
# It can be moved into the setUpClass as well.
@decorators.attr(type=['negative'])
@decorators.idempotent_id('91058876-9947-4807-9f22-f6eb17140d9b')
def test_create_server_when_cpu_quota_is_full(self):
"""Disallow server creation when tenant's vcpu quota is full"""
self._update_quota('cores', 0)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.create_test_server)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6fdd7012-584d-4327-a61c-49122e0d5864')
def test_create_server_when_memory_quota_is_full(self):
"""Disallow server creation when tenant's memory quota is full"""
self._update_quota('ram', 0)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.create_test_server)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7c6be468-0274-449a-81c3-ac1c32ee0161')
def test_create_server_when_instances_quota_is_full(self):
"""Once instances quota limit is reached, disallow server creation"""
self._update_quota('instances', 0)
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.create_test_server)
class QuotasSecurityGroupAdminNegativeTest(QuotasAdminNegativeTestBase):
"""Negative tests of nova security group quota"""
max_microversion = '2.35'
@decorators.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7c6c8f3b-2bf6-4918-b240-57b136a66aa0')
@utils.services('network')
def test_security_groups_exceed_limit(self):
"""Negative test: Creation Security Groups over limit should FAIL"""
# Set the quota to number of used security groups
sg_quota = self.limits_client.show_limits()['limits']['absolute'][
'totalSecurityGroupsUsed']
self._update_quota('security_groups', sg_quota)
# Check we cannot create anymore
# A 403 Forbidden or 413 Overlimit (old behaviour) exception
# will be raised when out of quota
self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
self.sg_client.create_security_group,
name="sg-overlimit", description="sg-desc")
@decorators.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('6e9f436d-f1ed-4f8e-a493-7275dfaa4b4d')
@utils.services('network')
def test_security_groups_rules_exceed_limit(self):
"""Negative test: Creation of Security Group Rules should FAIL"""
# when we reach limit maxSecurityGroupRules
self._update_quota('security_group_rules', 0)
s_name = data_utils.rand_name('securitygroup')
s_description = data_utils.rand_name('description')
securitygroup = self.sg_client.create_security_group(
name=s_name, description=s_description)['security_group']
self.addCleanup(self.sg_client.delete_security_group,
securitygroup['id'])
secgroup_id = securitygroup['id']
ip_protocol = 'tcp'
# Check we cannot create SG rule anymore
# A 403 Forbidden or 413 Overlimit (old behaviour) exception
# will be raised when out of quota
self.assertRaises((lib_exc.OverLimit, lib_exc.Forbidden),
self.sgr_client.create_security_group_rule,
parent_group_id=secgroup_id, ip_protocol=ip_protocol,
from_port=1025, to_port=1025)
|
the-stack_106_25074 | import components
def NumDumbNodesTest (size):
fws = ['f_%d'%(f) for f in xrange(0, size)]
# Start with 4 end hosts
end_hosts = ['e_%d'%(e) for e in xrange(2)]
all_nodes = []
all_nodes.extend(end_hosts)
all_nodes.extend(fws)
addresses = ['ip_%s'%(n) for n in all_nodes]
ctx = components.Context(all_nodes, addresses)
net = components.Network(ctx)
end_hosts = [components.EndHost(getattr(ctx, e), net, ctx) for e in end_hosts]
firewalls = [components.NullNode(getattr(ctx, f), net, ctx) for f in fws]
[e0, e1] = end_hosts
all_node_objects = []
all_node_objects.extend(end_hosts)
all_node_objects.extend(firewalls)
addresses = [getattr(ctx, ad) for ad in addresses]
address_mappings = [(ob, ad) for (ob, ad) in zip(all_node_objects, addresses)]
net.setAddressMappings(address_mappings)
# This is a test that can be used for both positive and negative testing; one pair
# of endhosts are allowed to send the other isn't
"""Topology
f0
/ \
e0 e1"""
routing_table = [(ctx.ip_e_0, e0), \
(ctx.ip_e_1, e1)]
net.RoutingTable(firewalls[0], routing_table)
for e in end_hosts:
net.SetGateway(e, firewalls[0])
net.Attach(*all_node_objects)
node_dict = dict(zip(all_nodes, all_node_objects))
class NumDumbResult (object):
def __init__ (self, net, ctx, **nodes):
self.net = net
self.ctx = ctx
for k, v in nodes.iteritems():
setattr(self, k, v)
self.check = components.PropertyChecker (ctx, net)
return NumDumbResult (net, ctx, **node_dict)
|
the-stack_106_25075 | from __future__ import annotations
import sys
import os
sys.path.append(os.getcwd())
from typing import List, Dict, Tuple
import math
import joblib
import numpy as np
import bisect
import random
import copy
from src.read_problem_data import ProblemData
from src.util.print import bcolors
int_inf = 9999
class Node:
id: str
tw_start: int = -1
tw_end: int = math.inf
demand: List[int] = []
zone: str = "na"
def __init__(self, name, is_factory=False):
self.id: str = name
self.is_factory: bool = is_factory
def __repr__(self):
return f"{self.id}: ({self.tw_start}, {self.tw_end}), {self.demand}, (zone {self.zone})"
class ProblemDataExtended(ProblemData):
def __init__(self, file_path: str) -> None:
super().__init__(file_path)
self._init_nodes()
self._init_quay_capacities()
self.max_transport_cost = {vessel: max(self.transport_unit_costs[v] * t
for (v, i, j), t in self.transport_times.items() if v == vessel)
for vessel in self.vessels}
self.no_time_periods = len(self.time_periods)
self.exernal_depot_ids = self._get_external_depots()
self.init_inventory_lists = {factory: [self.factory_initial_inventories[factory, p] for p in self.products]
for factory in self.factory_nodes}
def _init_nodes(self) -> None:
f_nodes = self.factory_nodes[:]
o_nodes = self.order_nodes[:]
self.factory_nodes: Dict[str, Node] = {}
self.order_nodes: Dict[str, Node] = {}
for i in f_nodes:
node = Node(i, is_factory=True)
node.tw_start = 1
node.tw_end = len(self.time_periods) - 1
self.factory_nodes[i] = node
for i in o_nodes:
node = Node(i)
node.tw_start = self.tw_start[i]
node.tw_end = self.tw_end[i]
node.demand = [self.demands[i, i, p] for p in self.products]
self.order_nodes[i] = node
for zone in self.orders_for_zones.keys():
for order_node in self.orders_for_zones[zone]:
self.order_nodes[order_node].zone = zone
self.nodes: Dict[str, Node] = {**self.factory_nodes, **self.order_nodes}
def _init_quay_capacities(self) -> None:
quay_capacity = {factory: [cap for (f, t), cap in self.factory_max_vessels_loading.items() if f == factory]
for factory in self.factory_nodes}
quay_cap_incr_times = {factory: [t for t in range(1, len(quay_capacity[factory]))
if quay_capacity[factory][t - 1] < quay_capacity[factory][t]]
for factory in self.factory_nodes}
quay_cap_decr_times = {factory: [t for t in range(1, len(quay_capacity[factory]))
if quay_capacity[factory][t - 1] > quay_capacity[factory][t]]
for factory in self.factory_nodes}
self.quay_capacity: Dict[str, List[int]] = quay_capacity
self.quay_cap_incr_times: Dict[str, List[int]] = quay_cap_incr_times
self.quay_cap_decr_times: Dict[str, List[int]] = quay_cap_decr_times
def _get_external_depots(self) -> List[str]:
return [factory for factory in self.factory_nodes
if sum(self.production_max_capacities[p_line, product]
for f, p_line in self.production_lines_for_factories if f == factory
for product in self.products)
== 0]
def get_factory_sub_problem(self, factory: str, orders: List[str], vessels: List[str]) -> ProblemDataExtended:
new_prbl = copy.copy(self)
new_prbl.nodes = [factory] + orders
new_prbl.factory_nodes = [factory]
new_prbl.order_nodes = orders
new_prbl.products = self.products
new_prbl.vessels = vessels
new_prbl.orders_for_zones = {zone: [order for order in o_list if order in orders]
for zone, o_list in self.orders_for_zones.items()}
new_prbl.zones = self.zones
new_prbl.nodes_for_vessels = {(vessel, node): val
for (vessel, node), val in self.nodes_for_vessels.items()
if node in new_prbl.nodes and vessel in vessels}
new_prbl.time_periods = self.time_periods
new_prbl.start_times_for_vessels = {vessel: t for vessel, t in self.start_times_for_vessels.items()
if vessel in vessels}
new_prbl.vessel_initial_locations = {vessel: loc for vessel, loc in self.vessel_initial_locations.items()
if vessel in vessels}
new_prbl.time_windows_for_orders = {(order, t): v
for (order, t), v in self.time_windows_for_orders.items()
if order in orders}
new_prbl.tw_start = {i: self.tw_start[i] for i in orders}
new_prbl.tw_end = {i: self.tw_end[i] for i in orders}
new_prbl.vessel_ton_capacities = {vessel: cap for vessel, cap in self.vessel_ton_capacities.items()
if vessel in vessels}
new_prbl.vessel_nprod_capacities = {vessel: cap
for vessel, cap in self.vessel_nprod_capacities.items()
if vessel in vessels}
new_prbl.factory_inventory_capacities = {f: cap
for f, cap in self.factory_inventory_capacities.items()
if f == factory}
print(self.factory_inventory_capacities.items())
print(new_prbl.factory_inventory_capacities)
new_prbl.factory_initial_inventories = {(f, p): inv
for (f, p), inv in self.factory_initial_inventories.items()
if f == factory}
new_prbl.inventory_unit_costs = {f: cost for f, cost in self.inventory_unit_costs.items()
if f == factory}
new_prbl.loading_unloading_times = {(vessel, node): cost
for (vessel, node), cost in self.loading_unloading_times.items()
if vessel in vessels and node in new_prbl.nodes}
new_prbl.transport_unit_costs = {vessel: cost for vessel, cost in self.transport_unit_costs.items()
if vessel in vessels}
new_prbl.transport_times = {(v, i, j): t for (v, i, j), t in self.transport_times.items()
if v in vessels
and i in new_prbl.nodes + ['d_0', 'd_-1']
and j in new_prbl.nodes + ['d_0', 'd_-1']}
new_prbl.transport_times_exact = {(v, i, j): t for (v, i, j), t in self.transport_times.items()
if v in vessels
and i in new_prbl.nodes + ['d_0', 'd_-1']
and j in new_prbl.nodes + ['d_0', 'd_-1']}
# new_prbl.arcs_for_vessels = self.generate_arcs_for_vessels() # Skipped due to restricted time available
new_prbl.demands = {(i, j, p): d for (i, j, p), d in self.demands.items()
if i in new_prbl.nodes and j in new_prbl.nodes}
new_prbl.production_stops = {(f, t): stop
for (f, t), stop in self.production_stops.items()
if f == factory}
new_prbl.production_start_costs = {(f, p): cost
for (f, p), cost in self.production_start_costs.items()
if f == factory}
new_prbl.production_lines_for_factories = [(f, l) for f, l in self.production_lines_for_factories
if f == factory]
new_prbl.production_lines = [l for f, l in new_prbl.production_lines_for_factories]
new_prbl.production_max_capacities = {(l, p): cap
for (l, p), cap in self.production_max_capacities.items()
if l in new_prbl.production_lines}
new_prbl.production_line_min_times = {(l, p): min_t
for (l, p), min_t in self.production_line_min_times.items()
if l in new_prbl.production_lines}
new_prbl.product_groups = self.product_groups
new_prbl.factory_max_vessels_destination = {f: max_v
for f, max_v in
self.factory_max_vessels_destination.items()
if f == factory}
new_prbl.factory_max_vessels_loading = {(f, t): max_v
for (f, t), max_v in
self.factory_max_vessels_loading.items()
if f == factory}
new_prbl.min_wait_if_sick = {(v, i, j): t for (v, i, j), t in self.min_wait_if_sick.items()
if v in vessels and i in new_prbl.nodes and j in new_prbl.nodes}
new_prbl.min_wait_if_sick_abs = self.min_wait_if_sick_abs
new_prbl.external_delivery_penalties = {order: penalty
for (order, penalty) in
self.external_delivery_penalties.items()
if order in orders}
new_prbl.no_time_periods = len(self.time_periods)
new_prbl._init_nodes()
return new_prbl
class Solution:
def __init__(self, prbl: ProblemDataExtended, verbose: bool = False, debug: bool = False) -> None:
self.prbl = prbl
self.debug = debug
self.routes: Dict[str, List[str]] = {v: [self.prbl.vessel_initial_locations[v]] for v in self.prbl.vessels}
self.e: Dict[str, List[int]] = {v: [max(1, self.prbl.start_times_for_vessels[v] + 1)] for v in
self.prbl.vessels}
self.l: Dict[str, List[int]] = {v: [len(self.prbl.time_periods) - 1] for v in self.prbl.vessels}
self.factory_visits: Dict[str, List[str]] = self._init_factory_visits()
self.factory_visits_route_index: Dict[str, List[int]] = {f: [0 for _ in self.factory_visits[f]]
for f in self.prbl.factory_nodes}
self.temp_routes: Dict[str, List[str]] = {vessel: route[:] for vessel, route in self.routes.items()}
self.temp_e: Dict[str, List[int]] = {vessel: e[:] for vessel, e in self.e.items()}
self.temp_l: Dict[str, List[int]] = {vessel: l[:] for vessel, l in self.l.items()}
self.temp_factory_visits: Dict[str, List[str]] = {factory: visits[:]
for factory, visits in self.factory_visits.items()}
self.temp_factory_visits_route_index: Dict[str, List[int]] = {factory: visit_route_idxs[:]
for factory, visit_route_idxs in
self.factory_visits_route_index.items()}
self.ppfc_slack_factor: float = 1.0
self.verbose = verbose
def __repr__(self) -> str:
return f"Routes: {self.routes}"
def copy(self) -> Solution:
solution_copy = Solution(self.prbl) # problem data is static
solution_copy.routes = {vessel: route[:] for vessel, route in self.routes.items()}
solution_copy.e = {vessel: e[:] for vessel, e in self.e.items()}
solution_copy.l = {vessel: l[:] for vessel, l in self.l.items()}
solution_copy.factory_visits = {factory: visits[:] for factory, visits in self.factory_visits.items()}
solution_copy.factory_visits_route_index = {factory: visit_route_idxs[:]
for factory, visit_route_idxs in
self.factory_visits_route_index.items()}
solution_copy.temp_routes = {vessel: route[:] for vessel, route in self.temp_routes.items()}
solution_copy.temp_e = {vessel: e[:] for vessel, e in self.temp_e.items()}
solution_copy.temp_l = {vessel: l[:] for vessel, l in self.temp_l.items()}
solution_copy.temp_factory_visits = {factory: visits[:] for factory, visits in self.temp_factory_visits.items()}
solution_copy.temp_factory_visits_route_index = {factory: visit_route_idxs[:]
for factory, visit_route_idxs in
self.temp_factory_visits_route_index.items()}
solution_copy.ppfc_slack_factor = self.ppfc_slack_factor
solution_copy.verbose = self.verbose
return solution_copy
def insert_last_checked(self):
self.routes = {vessel: route[:] for vessel, route in self.temp_routes.items()}
self.e = {vessel: e[:] for vessel, e in self.temp_e.items()}
self.l = {vessel: l[:] for vessel, l in self.temp_l.items()}
self.factory_visits = {factory: visits[:] for factory, visits in self.temp_factory_visits.items()}
self.factory_visits_route_index = {factory: visit_route_idxs[:]
for factory, visit_route_idxs in
self.temp_factory_visits_route_index.items()}
def clear_last_checked(self):
self.temp_routes = {vessel: route[:] for vessel, route in self.routes.items()}
self.temp_e = {vessel: e[:] for vessel, e in self.e.items()}
self.temp_l = {vessel: l[:] for vessel, l in self.l.items()}
self.temp_factory_visits = {factory: visits[:] for factory, visits in self.factory_visits.items()}
self.temp_factory_visits_route_index = {factory: visit_route_idxs[:]
for factory, visit_route_idxs in
self.factory_visits_route_index.items()}
def check_insertion_feasibility(self, node_id: str, vessel: str, idx: int,
noise_factor: float = 0.0, ppfc: bool = False) -> bool:
node = self.prbl.nodes[node_id]
idx = len(self.temp_routes[vessel]) if idx > len(self.temp_routes[vessel]) else idx
# Checks that do NOT assume node is inserted in temp:
if not self.check_node_for_vessel_feasibility(node, vessel):
return False
if not self.check_load_feasibility(node, vessel, idx):
return False
if not self.check_no_products_feasibility(node, vessel, idx):
return False
if not self.check_external_depot_feasibility(node_id, vessel, idx):
return False
if not self.check_time_feasibility(node_id, vessel, idx, noise_factor):
return False
# Checks that do assume that node is inserted in temp:
if not self.check_final_factory_destination_feasibility(vessel, idx):
if self.verbose:
print(f"check_final_factory_destination_feasibility failed for {vessel}, {node.id} inserted at {idx}")
return False
if ppfc and not self.check_production_feasibility(vessel, idx):
return False
return True
def get_earliest(self, idx: int, vessel: str) -> int:
route = self.temp_routes[vessel]
node = self.prbl.nodes[route[idx]]
prev_node_id = route[idx - 1] if idx > 0 else None
prev_e = self.temp_e[vessel][idx - 1] if prev_node_id else self.prbl.start_times_for_vessels[vessel] + 1
prev_transport_time = self.prbl.transport_times[vessel, prev_node_id, node.id] if prev_node_id else 0
prev_loading_unloading_time = self.prbl.loading_unloading_times[vessel, prev_node_id] if prev_node_id else 0
earliest = max(node.tw_start, prev_e + prev_loading_unloading_time + prev_transport_time)
# If the precedence extension is included, earliest visiting time must also incorporate minimum waiting time
if prev_node_id and ((self.prbl.nodes[prev_node_id].zone, self.prbl.nodes[node.id].zone) in
[("red", "green"), ("red", "yellow"), ("yellow", "green")]):
earliest = max(earliest, prev_e + prev_loading_unloading_time + self.prbl.min_wait_if_sick_abs)
return earliest
def get_latest(self, idx: int, vessel: str) -> int:
route = self.temp_routes[vessel]
node = self.prbl.nodes[route[idx]]
next_node_id = route[idx + 1] if idx + 1 < len(route) else None
next_l = self.temp_l[vessel][idx + 1] if next_node_id else len(self.prbl.time_periods) - 1
next_transport_time = self.prbl.transport_times[vessel, node.id, next_node_id] if next_node_id else 0
if idx == len(route) - 1 and node.is_factory:
loading_unloading_time = 0 # zero loading time for last visited factory
else:
loading_unloading_time = self.prbl.loading_unloading_times[vessel, node.id]
latest = min(node.tw_end, next_l - next_transport_time - loading_unloading_time)
# if the last node is not a factory, make sure there is enough time to reach a factory destination
if idx == len(route) - 1 and not node.is_factory:
time_to_factory = min(self.prbl.transport_times[vessel, node.id, f] for f in self.prbl.factory_nodes)
latest = min(latest, len(self.prbl.time_periods) - time_to_factory - loading_unloading_time - 1)
# If the precedence extension is included, latest visiting time must also incorporate minimum waiting time
if next_node_id is not None and ((node.zone, self.prbl.nodes[next_node_id].zone) in
[("red", "green"), ("red", "yellow"), ("yellow", "green")]):
latest = min(latest, next_l - loading_unloading_time - self.prbl.min_wait_if_sick_abs)
return latest
def get_factory_earliest(self, factory: str, idx: int,
prev_departure_times: List[int] = None) -> Tuple[int, List[int]]:
"""
:param factory:
:param idx: The index of the visit in self.factory_visits
:param prev_departure_times: The previous visits' earliest departure times
:return: The earliest visit time based on factory constraints
(route constraints are assumed to be already calculated)
"""
quay_cap_incr_times = self.prbl.quay_cap_incr_times[factory] + [int_inf]
visits = self.temp_factory_visits[factory]
visit_indices = self.temp_factory_visits_route_index[factory]
vessel = visits[idx]
# destination factory -> zero loading time
loading_times = [self.prbl.loading_unloading_times[visits[i], factory]
* (not self._is_destination_factory(visits[i], visit_indices[i]))
for i in range(len(visits))]
curr_loading_time = loading_times[idx]
prev_e = self.temp_e[visits[idx - 1]][visit_indices[idx - 1]] if idx > 0 else -1
if not prev_departure_times:
prev_departure_times = [self.temp_e[visits[i]][visit_indices[i]] + loading_times[i] - 1
for i in range(0, idx)] + [int_inf]
idx_e = self.temp_e[vessel][visit_indices[idx]] # earliest from route
t = max(prev_e, idx_e)
# update event lists
prev_departure_times.sort()
prev_departure_times = self._ordered_list_min_threshold(prev_departure_times, t)
quay_cap_incr_times = self._ordered_list_min_threshold(quay_cap_incr_times, t)
# both quay capacity and the departures of other vessels limit insertion
# -> iterate to find first possible t where quay_vessels < min quay_capacity over loading interval
quay_vessels = len(prev_departure_times) - 1
while curr_loading_time > 0 and quay_vessels >= self._get_min_quay_capacity(factory, t, curr_loading_time):
if prev_departure_times[0] < quay_cap_incr_times[0]:
t = prev_departure_times.pop(0) + 1
quay_vessels -= 1
else:
t = quay_cap_incr_times.pop(0)
if curr_loading_time > 0:
bisect.insort(prev_departure_times, t + curr_loading_time - 1)
return t, prev_departure_times
def get_factory_latest(self, factory: str, idx: int,
next_arrival_times: List[int] = None) -> Tuple[int, List[int]]:
"""
:param factory:
:param idx: The index of the visit in self.factory_visits
:param next_arrival_times: the succeeding visits' latest arrival times
:return: The latest visit time based on factory constraints
"""
quay_cap_decr_times = [-int_inf] + self.prbl.quay_cap_decr_times[factory]
visits = self.temp_factory_visits[factory]
visit_indices = self.temp_factory_visits_route_index[factory]
vessel = visits[idx]
# destination factory -> zero loading time
curr_loading_time = (self.prbl.loading_unloading_times[vessel, factory]
* (not self._is_destination_factory(vessel, visit_indices[idx])))
next_l = self.temp_l[visits[idx + 1]][visit_indices[idx + 1]] if idx + 1 < len(visits) else int_inf
if not next_arrival_times:
next_arrival_times = [-int_inf] + [self.temp_l[visits[i]][visit_indices[i]]
for i in range(idx + 1, len(visits))]
idx_l = self.temp_l[vessel][visit_indices[idx]] # latest from route
t = min(next_l, idx_l)
# update event lists
next_arrival_times.sort()
next_arrival_times = self._ordered_list_max_threshold(next_arrival_times, t + curr_loading_time + 1)
quay_cap_decr_times = self._ordered_list_max_threshold(quay_cap_decr_times, t + curr_loading_time + 1)
# both quay capacity and the departures of other vessels limit insertion
# -> iterate to find first possible t where quay_vessels < min quay_capacity over loading interval
quay_vessels = len(next_arrival_times) - 1
while curr_loading_time > 0 and quay_vessels >= self._get_min_quay_capacity(factory, t, curr_loading_time):
if next_arrival_times[-1] >= quay_cap_decr_times[-1]:
t = next_arrival_times.pop() - curr_loading_time
quay_vessels -= 1
else:
t = quay_cap_decr_times.pop() - curr_loading_time
if curr_loading_time > 0:
bisect.insort(next_arrival_times, t)
return t, next_arrival_times
def check_earliest_forward(self, vessel: str, idx: int, force_propagation: bool = False) -> bool:
"""Iteratively checks earliest time for succeeding visits until no change"""
route = self.temp_routes[vessel]
for i in range(idx, len(route)):
e = self.get_earliest(i, vessel)
updated_e = e > self.temp_e[vessel][i]
self.temp_e[vessel][i] = e if updated_e else self.temp_e[vessel][
i] # update temp_e if stronger bound is found
if self.temp_l[vessel][i] < e: # insertion squeezes out succeeding node
if self.verbose:
print(f"Check failed at: check_earliest_forward for {vessel}. "
f"Forward from route index {idx} at {i}: ({e}, {self.temp_l[vessel][i]})")
return False
node = self.prbl.nodes[route[i]]
if node.is_factory: # check routes visiting the same factory before than this visit
factory_visit_idx = self._get_factory_visit_idx(node.id, vessel, i)
if not self.check_factory_visits_earliest_forward(node.id, factory_visit_idx + 1):
return False # the changed e for the factory resulted in a node on another route being squeezed out
if not updated_e and not force_propagation: # propagation of check stops if e is unchanged
break
return True
def check_latest_backward(self, vessel: str, idx: int, force_propagation: bool = False) -> bool:
"""Iteratively checks latest time for preceding visits until no change"""
route = self.temp_routes[vessel]
for i in range(idx - 1, -1, -1):
l = self.get_latest(i, vessel)
updated_l = l < self.temp_l[vessel][i]
self.temp_l[vessel][i] = l if updated_l else self.temp_l[vessel][i]
# update temp_l if stronger bound is found
if l < self.temp_e[vessel][i]: # insertion squeezes out preceding node
if self.verbose:
print(f"Check failed at: check_latest_backward for {vessel}. "
f"Backward from route index {idx} at {i}: ({self.temp_e[vessel][i]}, {l})")
return False
node = self.prbl.nodes[route[i]]
if node.is_factory: # check routes visiting the same factory after than this visit
factory_visit_idx = self._get_factory_visit_idx(node.id, vessel, i)
if not self.check_factory_visits_latest_backward(node.id, factory_visit_idx):
return False # the changed l for the factory resulted in a node on another route being squeezed out
if not updated_l and not force_propagation: # propagation of check stops if l is unchanged
break
return True
def check_factory_visits_earliest_forward(self, factory: str, idx: int, prev_dep_times: List[int] = None,
force_propagation: bool = False) -> bool:
"""
Iteratively checks a factory visits' earliest arrival times for succeeding vessels until violation or no overlap
:param factory:
:param idx: the factory visit index to check from
:param prev_dep_times:
:return: True if there is no constraint violation
"""
for i in range(idx, len(self.factory_visits[factory])):
vessel = self.temp_factory_visits[factory][i]
route_index = self.temp_factory_visits_route_index[factory][i]
e, prev_dep_times = self.get_factory_earliest(factory, i, prev_departure_times=prev_dep_times)
updated_e = e > self.temp_e[vessel][route_index]
if updated_e:
self.temp_e[vessel][route_index] = e
if self.temp_l[vessel][route_index] < e:
if self.verbose:
print(f"Check failed at: check_factory_visits_earliest_forward for {factory}. "
f"Forward from factory visit {idx} at {i}: : ({e}, {self.temp_l[vessel][route_index]})")
return False # not enough time space for insertion
if updated_e:
self.temp_e[vessel][route_index] = e
if not self.check_earliest_forward(vessel, route_index + 1):
return False
if max(prev_dep_times[:-1], default=-int_inf) < self.temp_e[vessel][route_index] and not force_propagation:
break # stop propagation if no overlap between visit i and previous departures
return True
def check_factory_visits_latest_backward(self, factory: str, idx: int, next_arr_times: List[int] = None,
force_propagation: bool = False) -> bool:
"""
Iteratively checks a factory visits' latest arrival times for preceding vessels until violation or no overlap
:param factory:
:param idx: the factory visit index to check from
:param next_arr_times:
:return: True if there is no constraint violation
"""
for i in range(idx, -1, -1):
vessel = self.temp_factory_visits[factory][i]
route_index = self.temp_factory_visits_route_index[factory][i]
l, next_arr_times = self.get_factory_latest(factory, i, next_arrival_times=next_arr_times)
updated_l = l < self.temp_l[vessel][route_index]
if l < self.temp_e[vessel][route_index]:
if self.verbose:
print(f"Check failed at: check_factory_visits_latest_backward for {factory}. "
f"Backward from factory visit {idx} at {i}: ({self.temp_e[vessel][route_index]}, {l})")
return False # not enough time space for insertion
if updated_l:
self.temp_l[vessel][route_index] = l
if not self.check_latest_backward(vessel, route_index):
return False
if self.temp_l[vessel][route_index] < min(next_arr_times[-1:], default=int_inf) and not force_propagation:
break # stop propagation if no overlap between visit i and next arrivals
return True
def check_time_feasibility(self, insert_node_id: str, vessel: str, idx: int, noise_factor: float = 0.0) -> bool:
route = self.temp_routes[vessel]
insert_node = self.prbl.nodes[insert_node_id]
idx = len(route) + idx + 1 if idx < 0 else idx # transform negative indexes
# Increase route indexes for factory visits succeeding the insert
for factory in self.prbl.factory_nodes:
for i, v in enumerate(self.temp_factory_visits[factory]):
if v == vessel and idx <= self.temp_factory_visits_route_index[factory][i]:
self.temp_factory_visits_route_index[factory][i] += 1
self.temp_routes[vessel].insert(idx, insert_node_id)
self.temp_e[vessel].insert(idx, insert_node.tw_start) # initial value
self.temp_l[vessel].insert(idx, insert_node.tw_end) # initial value
e = self.get_earliest(idx, vessel)
l = self.get_latest(idx, vessel)
if l < e: # not enough time space for insertion
if self.verbose:
print(f"Check failed at: check_time_feasibility for {vessel}: ({e}, {l})")
return False
self.temp_e[vessel][idx] = e
self.temp_l[vessel][idx] = l
if insert_node.is_factory:
factory_visit_idx = self._get_factory_visit_insert_idx(insert_node_id, e)
self.temp_factory_visits[insert_node_id].insert(factory_visit_idx, vessel)
self.temp_factory_visits_route_index[insert_node_id].insert(factory_visit_idx, idx)
if not self.check_latest_backward(vessel, idx):
return False
if not self.check_earliest_forward(vessel, idx + 1):
return False
if insert_node.is_factory:
e, prev_dep_times = self.get_factory_earliest(insert_node_id, factory_visit_idx)
l, next_arr_times = self.get_factory_latest(insert_node_id, factory_visit_idx)
if l < e:
if self.verbose:
print(f"Check failed at: check_time_feasibility (factory check) for {vessel}: ({e}, {l})")
return False
self.temp_e[vessel][idx] = max(e, self.temp_e[vessel][idx])
self.temp_l[vessel][idx] = min(l, self.temp_l[vessel][idx])
if not self.check_factory_visits_earliest_forward(insert_node_id, factory_visit_idx + 1, prev_dep_times):
return False
if not self.check_factory_visits_latest_backward(insert_node_id, factory_visit_idx, next_arr_times):
return False
# if node is inserted after a destination factory, we must update e for this factory's visits
# (the factory destination does now have a loading time as it is no longer a destination)
if (idx == len(route) - 1 and self.prbl.nodes[route[-2]].is_factory
and not self.check_factory_visits_earliest_forward(route[-2], idx - 1)): # NB: Changed
return False
# if an order is inserted at the end of the route, insert a new if possible factory destination
if (idx == len(route) - 1 and not insert_node.is_factory
and not self.check_and_set_destination_factory(vessel, noise_factor)):
return False
return True
def check_and_set_destination_factory(self, vessel: str, noise_factor: float = 0.0) -> bool:
"""Picks a destination factory for the route in a greedy manner, but with noise applied"""
route = self.temp_routes[vessel]
factory_destination_options = [(factory_node, self.get_insertion_utility(factory_node, vessel, len(route),
noise_factor))
for factory_node in self.prbl.factory_nodes.values()]
factory_destination_options.sort(key=lambda item: item[1], reverse=True)
# perform changes in a copy, to distinguish temp changes related to factory destination insertion checks from
# those related to the original insertion
copy_sol = self.copy()
for factory_node, _ in factory_destination_options:
if copy_sol.check_insertion_feasibility(factory_node.id, vessel, len(route)):
self._set_temp_vars_to_solution(copy_sol) # move update to self.temp
return True
else:
copy_sol = self.copy()
if self.verbose:
print(f"Check failed at: check_and_set_destination_factory for {vessel}")
return False
def check_external_depot_feasibility(self, node_id: str, vessel: str, idx: int):
prev_factory = self.routes[vessel][self.get_voyage_start_idx(vessel, idx)]
if prev_factory in self.prbl.exernal_depot_ids:
node_demand = self.prbl.nodes[node_id].demand
return all(init_inv_p >= loaded_amount_p + demand_p
for demand_p, init_inv_p, loaded_amount_p in
zip(node_demand, self.prbl.init_inventory_lists[prev_factory],
self.get_factory_loaded_amount(prev_factory)))
else:
return True
def check_final_factory_destination_feasibility(self, vessel: str, idx: int):
# Check assumes that the insertion is already present in temp variables
node_id = self.temp_routes[vessel][idx]
node = self.prbl.nodes[node_id]
if node.is_factory:
return (len([v for v in self.prbl.vessels if
(self.temp_routes[v][-1] == node_id)]) # and len(self.temp_routes[v]) > 1)])
<= self.prbl.factory_max_vessels_destination[node_id])
else: # destination factories are unchanged or 'removed'
return True
def check_load_feasibility(self, insert_node: Node, vessel: str, idx: int) -> bool:
route = self.routes[vessel]
voyage_start, voyage_end = self.get_voyage_start_end_idx(vessel, idx)
voyage_demand = sum(d for node_id in route[voyage_start:voyage_end] for d in self.prbl.nodes[node_id].demand)
return voyage_demand + sum(d for d in insert_node.demand) <= self.prbl.vessel_ton_capacities[vessel]
def check_no_products_feasibility(self, insert_node: Node, vessel: str, idx: int) -> bool:
if self.prbl.vessel_nprod_capacities[vessel] >= len(self.prbl.products) or insert_node.is_factory:
return True
route = self.routes[vessel]
voyage_start, voyage_end = self.get_voyage_start_end_idx(vessel, idx)
voyage_demanded_products = [any(self.prbl.nodes[node_id].demand[p]
for node_id in route[voyage_start + 1:voyage_end])
for p in range(len(self.prbl.products))]
insert_node_demanded_products = [bool(d) for d in insert_node.demand]
combined_demanded_products = np.logical_or(voyage_demanded_products, insert_node_demanded_products)
return sum(combined_demanded_products) <= self.prbl.vessel_nprod_capacities[vessel]
def check_node_for_vessel_feasibility(self, insert_node: Node, vessel: str) -> bool:
return self.prbl.nodes_for_vessels[(vessel, insert_node.id)] == 1
def check_production_feasibility(self, vessel: str = None, idx: int = None) -> Tuple[bool, str]:
factories_to_check: List[str] = []
if vessel and idx:
for f in self.prbl.factory_nodes.keys():
if (not self.prbl.nodes[self.temp_routes[vessel][idx]].is_factory and
self.get_temp_voyage_start_factory(vessel=vessel, idx=idx) == f): # added order picked up at f
factories_to_check.append(f)
elif self.is_factory_latest_changed_in_temp(
f): # factory may have to be ready for vessel loading earlier
factories_to_check.append(f)
else:
factories_to_check = list(self.prbl.factory_nodes.keys())
# Feasibility is checked for relevant factories
for factory_node_id in factories_to_check:
demands = self.get_demand_dict(relevant_factories=[factory_node_id]) # (factory_node_id, p, t): demand
pickups: List[Tuple[int, List[int]]] = [] # [latest_pickup_time, [demand for each product]]
for t in self.prbl.time_periods:
if sum(demands[factory_node_id, p, t] for p in self.prbl.products) > 0:
pickups.append((t, [demands[factory_node_id, p, t] for p in self.prbl.products]))
pickups.sort(key=lambda tup: tup[0]) # sort according to latest pickup time (asc)
latest_loading_times = np.array([pickup[0] for pickup in pickups])
products_for_voyage = np.array([pickup[1] for pickup in pickups])
# Make cumulative representation
production_requirement_cum = np.cumsum(products_for_voyage, axis=0)
# Find the minimum number of activities that must be undertaken before a given loading event
activity_requirement_cum = np.copy(production_requirement_cum)
production_lines = [l for (i, l) in self.prbl.production_lines_for_factories if i == factory_node_id]
# filter(lambda x: x[0] == factory_node_id, self.prbl.production_lines_for_factories)]
for p in range(len(self.prbl.products)): # for all columns in the matrix
initial_inventory = self.prbl.factory_initial_inventories[(factory_node_id, self.prbl.products[p])]
production_capacity_max = max(
[self.prbl.production_max_capacities[l, self.prbl.products[p]] for l in production_lines] + [0])
for k in range(np.shape(production_requirement_cum)[0]):
if production_capacity_max > 0:
activity_requirement = np.ceil((production_requirement_cum[k, p] - initial_inventory) /
production_capacity_max)
activity_requirement_cum[k][p] = max(0, activity_requirement)
else:
if production_requirement_cum[k, p] > initial_inventory:
return False, factory_node_id
activity_requirement_cum[k][p] = 0
for k in range(len(activity_requirement_cum) - 1, 0, -1):
production_time_periods = len(production_lines) * sum([self.prbl.production_stops[factory_node_id, t]
for t in range(latest_loading_times[k - 1],
latest_loading_times[k])])
for i in range(max(np.sum(activity_requirement_cum[k], axis=0)
- np.sum(activity_requirement_cum[k - 1], axis=0)
- production_time_periods,
0)): # number of activities in this interval exceeding production_time_periods
for p in range(np.shape(production_requirement_cum)[1]):
if activity_requirement_cum[k][p] > 0:
activity_requirement_cum[k - 1][p] += 1 # pushing production activities to occur earlier
break
# If pushing production activities earlier results in too much production taking place
# before the first loading, then the production schedule is infeasible
if len(latest_loading_times) > 0: # if there are orders to be delivered at all
latest = latest_loading_times[0]
first_production_time_periods = len(production_lines) * sum(
[self.prbl.production_stops[factory_node_id, t]
for t in range(latest + 1)])
if (first_production_time_periods <
self.ppfc_slack_factor * np.sum(activity_requirement_cum[0], axis=None)):
if self.verbose:
print(f"check_production_feasibility failed on production for {factory_node_id}")
return False, factory_node_id
# Checking for inventory feasibility
# Removed this - cannot _prove_ infeasibility (could pick up at earliest point in time instead)
# for k in range(np.shape(activity_requirement_cum)[0]): # for all rows in the array
# production_capacity_min = min([self.prbl.production_min_capacities[l, p]
# for l in production_lines
# for p in self.prbl.products])
# inventory = (np.sum(activity_requirement_cum[k], axis=0) * production_capacity_min +
# np.sum([self.prbl.factory_initial_inventories[factory_node_id, p]
# for p in self.prbl.products]))
# if k > 0: # subtract previous loadings
# inventory = inventory - np.sum(products_for_voyage[:k])
# if inventory > self.prbl.factory_inventory_capacities[factory_node_id]:
# if self.verbose:
# print(f"check_production_feasibility failed on inventory for {factory_node_id}")
# return False
return True, ''
def get_demand_dict(self, relevant_factories: List[str] = None) -> Dict[Tuple[str, str, int], int]:
demands: Dict[Tuple[str, str, int], int] = {} # (i, p, t): demand
factories = relevant_factories if relevant_factories else [k for k in self.prbl.factory_nodes.keys()]
time_horizon = max(self.prbl.time_periods)
for factory_node_id in factories:
# List of tuples: (vessel, route_idx, latest)
visits: List[Tuple[str, int, int]] = [(self.temp_factory_visits[factory_node_id][i],
self.temp_factory_visits_route_index[factory_node_id][i],
self.temp_l[self.temp_factory_visits[factory_node_id][i]][
self.temp_factory_visits_route_index[factory_node_id][i]])
for i in range(len(self.temp_factory_visits[factory_node_id]))]
for (v, idx, l) in visits:
voyage_end_idx = self.get_temp_voyage_end_idx(vessel=v, start_idx=idx)
if l > time_horizon: # factory visit is not for product pickup
continue
demand = {self.prbl.products[i]: sum(self.prbl.nodes[j].demand[i]
for j in self.temp_routes[v][idx + 1:voyage_end_idx])
for i in range(len(self.prbl.products))}
for p in demand.keys():
if (factory_node_id, p, l) in demands.keys():
demands[(factory_node_id, p, l)] += demand[p]
else:
demands[(factory_node_id, p, l)] = demand[p]
for p in self.prbl.products:
for t in self.prbl.time_periods:
if (factory_node_id, p, t) not in demands.keys():
demands[(factory_node_id, p, t)] = 0
return demands
def remove_node(self, vessel: str, idx: int):
node = self.prbl.nodes[self.routes[vessel][idx]]
if node.is_factory:
factory_visit_idx = self._get_factory_visit_idx(node.id, vessel, idx)
self.factory_visits[node.id].pop(factory_visit_idx)
self.factory_visits_route_index[node.id].pop(factory_visit_idx)
self.temp_factory_visits[node.id].pop(factory_visit_idx)
self.temp_factory_visits_route_index[node.id].pop(factory_visit_idx)
self.routes[vessel].pop(idx)
self.e[vessel].pop(idx)
self.l[vessel].pop(idx)
self.temp_routes[vessel].pop(idx)
self.temp_e[vessel].pop(idx)
self.temp_l[vessel].pop(idx)
def recompute_solution_variables(self):
# recompute factory visit route indexes
self.factory_visits_route_index = self.recompute_factory_visits_route_idx()
self.temp_factory_visits_route_index = {factory: route_indexes[:]
for factory, route_indexes in self.factory_visits_route_index.items()}
# remove factories consecutive factories
self.remove_consecutive_factories()
# recompute factory visit route indexes again after consecutive removals
self.factory_visits_route_index = self.recompute_factory_visits_route_idx()
self.temp_factory_visits_route_index = {factory: route_indexes[:]
for factory, route_indexes in self.factory_visits_route_index.items()}
# "open up" temp_e and temp_l to original time window
for vessel, route in self.routes.items():
for idx, node_id in enumerate(route):
self.temp_e[vessel][idx] = self.prbl.nodes[node_id].tw_start
self.temp_l[vessel][idx] = self.prbl.nodes[node_id].tw_end
# recompute new e and l for routes
for vessel, route in self.routes.items():
self.check_earliest_forward(vessel, 0, force_propagation=True)
self.check_latest_backward(vessel, len(route) - 1, force_propagation=True)
# recompute new e and l for factory visits
for factory, factory_visits in self.factory_visits.items():
self.check_factory_visits_earliest_forward(factory, 0, force_propagation=True)
self.check_factory_visits_latest_backward(factory, len(factory_visits) - 1, force_propagation=True)
# move updates from temp to main variables
self.insert_last_checked()
def remove_consecutive_factories(self) -> None:
illegal_route_removals = self.get_illegal_route_removals()
for vessel, route in self.temp_routes.items():
if len(route) < 2:
continue
elif len(route) == 2 and self.prbl.nodes[route[-1]].is_factory and vessel not in illegal_route_removals:
self.remove_node(vessel, 1)
continue
# iterate backwards so that we can delete without messing up indexes
for idx in range(len(route) - 1, 0, -1):
curr_node = route[idx]
preceding_node = route[idx - 1]
# remove the second visit if the route's two first factory visits are consecutive
remove_idx = max(1, idx)
if preceding_node == curr_node and self.prbl.nodes[curr_node].is_factory:
self.remove_node(vessel, remove_idx)
def recompute_factory_visits_route_idx(self) -> Dict[str, List[int]]:
# infer factory visit indexes from factory visits and routes
factory_visits_route_idx = {factory: [] for factory in self.prbl.factory_nodes}
for factory, factory_visits in self.factory_visits.items():
vessel_prev_factory_idx = {vessel: -1 for vessel in self.prbl.vessels}
for vessel in factory_visits:
route_idx = self.routes[vessel].index(factory, vessel_prev_factory_idx[vessel] + 1)
vessel_prev_factory_idx[vessel] = route_idx
factory_visits_route_idx[factory].append(route_idx)
return factory_visits_route_idx
def get_solution_routing_cost(self) -> int:
transport_times = self.prbl.transport_times_exact
transport_cost = sum(transport_times[vessel, route[i - 1], route[i]] * self.prbl.transport_unit_costs[vessel]
for vessel, route in self.routes.items()
for i in range(1, len(route)))
unmet_order_cost = sum(self.prbl.external_delivery_penalties[order_node]
for order_node in self.get_orders_not_served())
return round(transport_cost + unmet_order_cost)
def get_solution_transport_cost(self) -> int:
transport_times = self.prbl.transport_times_exact
transport_cost = sum(transport_times[vessel, route[i - 1], route[i]] * self.prbl.transport_unit_costs[vessel]
for vessel, route in self.routes.items()
for i in range(1, len(route)))
return transport_cost
def get_route_profit(self, vessel: str):
route = self.routes[vessel]
transport_times = self.prbl.transport_times_exact
order_profits = sum(self.prbl.external_delivery_penalties[node_id]
for node_id in route if not self.prbl.nodes[node_id].is_factory)
transport_cost = sum(transport_times[vessel, route[i - 1], route[i]] * self.prbl.transport_unit_costs[vessel]
for i in range(1, len(route)))
return order_profits - transport_cost
def get_voyage_profit(self, vessel: str, voyage_start_idx: int):
route = self.routes[vessel]
transport_times = self.prbl.transport_times_exact
voyage_indexes = [i for i in range(self.get_temp_voyage_end_idx(vessel, voyage_start_idx))]
node_before = route[max(0, voyage_indexes[0] - 1)]
node_after = route[min(len(route) - 1, voyage_indexes[-1] + 1)]
order_profits = sum(self.prbl.external_delivery_penalties[route[i]]
for i in voyage_indexes if not self.prbl.nodes[route[i]].is_factory)
transport_cost = sum(transport_times[vessel, route[i], route[i + 1]] * self.prbl.transport_unit_costs[vessel]
for i in voyage_indexes)
new_transport_cost = transport_times[vessel, node_before, node_after] * self.prbl.transport_unit_costs[vessel]
return order_profits - transport_cost + new_transport_cost
def get_insertion_utility(self, node: Node, vessel: str, idx: int,
noise_factor: float = 0) -> float: # High utility -> good insertion
route = self.temp_routes[vessel]
transport_times = self.prbl.transport_times_exact
if idx < len(self.temp_routes[vessel]) - 1: # node to be inserted is not at end of route
net_sail_change = (
transport_times[vessel, route[idx - 1], node.id] + transport_times[vessel, node.id, route[idx]]
- transport_times[vessel, route[idx - 1], route[idx]])
elif not node.is_factory: # insert order at the end of route
# assuming that vessel must sail back to the destination factory afterwards
net_sail_change = 2 * transport_times[vessel, route[idx - 1], node.id]
else:
net_sail_change = transport_times[vessel, route[idx - 1], node.id]
delivery_gain = self.prbl.external_delivery_penalties[node.id] if not node.is_factory else 0
noise = noise_factor * random.randrange(-self.prbl.max_transport_cost[vessel],
self.prbl.max_transport_cost[vessel])
return delivery_gain - net_sail_change * self.prbl.transport_unit_costs[vessel] + noise
def get_removal_utility(self, vessel: str, idx: int) -> float: # High utility -> good removal ("remove worst node")
route = self.routes[vessel]
transport_times = self.prbl.transport_times_exact
if idx >= len(route):
print("Index", idx, "does not exist for vessel", vessel)
return -1
net_sail_change = - transport_times[vessel, route[idx - 1], route[idx]]
if idx < len(self.routes[vessel]) - 1:
net_sail_change += (transport_times[vessel, route[idx - 1], route[idx + 1]]
- transport_times[vessel, route[idx], route[idx + 1]])
delivery_penalty = (self.prbl.external_delivery_penalties[route[idx]]
if not self.prbl.nodes[route[idx]].is_factory else 0)
return - (delivery_penalty + net_sail_change * self.prbl.transport_unit_costs[vessel])
def get_temp_voyage_start_idxs_for_factory(self, factory_node_id: str) -> Dict[str, List[Tuple[int, int]]]:
"""
:param factory_node_id:
:return: {vessel: (factory visit index in route, latest loading start time)} for the input factory
if vessel visits input factory for loading
"""
voyage_start_idxs_for_vessels: Dict[str, List[Tuple[int, int]]] = {}
for v in self.prbl.vessels:
voyage_start_idxs_for_vessels[v] = []
route = self.temp_routes[v]
# Adding index and latest loading time for vessels loading at input factory
for i in range(len(route) - 1): # last element in route is not included, as it cannot be a voyage start
if self.prbl.nodes[route[i]].id == factory_node_id:
voyage_start_idxs_for_vessels[v].append(
tuple((i, self.temp_l[v][i])))
# Vessel does not load at input factory -> vessel is skipped
if len(voyage_start_idxs_for_vessels[v]) == 0:
voyage_start_idxs_for_vessels.pop(v, None)
return voyage_start_idxs_for_vessels
def get_factory_loaded_amount(self, factory_id: str) -> List[int]:
load = np.zeros(len(self.prbl.products))
for v, voyage_start_idx in zip(self.factory_visits[factory_id], self.factory_visits_route_index[factory_id]):
i = voyage_start_idx + 1
while i < len(self.routes[v]) and not self.prbl.nodes[self.routes[v][i]].is_factory:
load += np.array(self.prbl.nodes[self.routes[v][i]].demand)
i += 1
return list(load)
def get_order_vessel_idx_for_factory(self, factory_node_id: str) -> List[Tuple[str, int]]:
return [(vessel, idx)
for vessel, voyage_start_idx in zip(self.factory_visits[factory_node_id],
self.factory_visits_route_index[factory_node_id])
for idx in range(voyage_start_idx + 1, self.get_temp_voyage_end_idx(vessel, voyage_start_idx))]
def is_factory_latest_changed_in_temp(self, factory_node_id: str) -> bool:
if (self.temp_factory_visits[factory_node_id] != self.factory_visits[factory_node_id] or
self.temp_factory_visits_route_index[factory_node_id] != self.factory_visits_route_index[
factory_node_id] or
not all(self.temp_l[self.temp_factory_visits[factory_node_id][i]][
self.temp_factory_visits_route_index[factory_node_id][i]] ==
self.l[self.factory_visits[factory_node_id][i]][
self.factory_visits_route_index[factory_node_id][i]]
for i in range(len(self.factory_visits[factory_node_id])))):
return True
return False
def get_voyage_start_end_idx(self, vessel: str, idx: int) -> Tuple[int, int]:
route = self.temp_routes[vessel]
voyage_start_idx = -1
voyage_end_idx = math.inf
for i in range(idx - 1, -1, -1):
if self.prbl.nodes[route[i]].is_factory:
voyage_start_idx = i
break
for i in range(idx, len(route)):
if self.prbl.nodes[route[i]].is_factory:
voyage_end_idx = i
break
# if no destination factory yet, end voyage at the end of route:
voyage_end_idx = min(voyage_end_idx, len(route))
assert voyage_start_idx != -1, "Illegal voyage, no initial factory"
return voyage_start_idx, voyage_end_idx
def get_voyage_start_idx(self, vessel: str, idx: int) -> int:
route = self.temp_routes[vessel]
voyage_start_idx = -1
for i in range(idx - 1, -1, -1):
if self.prbl.nodes[route[i]].is_factory:
voyage_start_idx = i
break
assert voyage_start_idx != -1, "Illegal voyage, no initial factory"
return voyage_start_idx
def get_temp_voyage_end_idx(self, vessel: str, start_idx: int) -> int:
route = self.temp_routes[vessel]
for i in range(start_idx + 1, len(route)):
if self.prbl.nodes[route[i]].is_factory:
return i
return len(route)
def get_temp_voyage_start_factory(self, vessel: str, idx: int) -> str:
route = self.temp_routes[vessel]
for i in range(idx - 1, 0, -1):
if self.prbl.nodes[route[i]].is_factory:
return self.temp_routes[vessel][i]
return self.temp_routes[vessel][0]
def get_illegal_route_removals(self) -> List[str]:
illegals = []
factory_dest_count = {f: 0 for f in self.prbl.factory_nodes}
for vessel, route in self.routes.items():
factory_dest_count[route[-1]] += 1
for vessel, route in self.routes.items():
if len(route) == 1:
continue
f_first = route[0]
f_last = route[-1]
if (f_first != f_last
and factory_dest_count[f_first]) >= self.prbl.factory_max_vessels_destination[f_first]:
illegals.append(vessel)
return illegals
def get_illegal_voyage_removals(self) -> List[Tuple[str, int]]:
illegal_removals = []
f_dest_count = {f: 0 for f in self.prbl.factory_nodes}
for vessel, route in self.routes.items():
f_dest_count[route[-1]] += 1
for vessel, route in self.routes.items():
if len(route) == 1:
continue
last_voy_start_idx = 0 if len(route) == 2 else self.get_voyage_start_idx(vessel, len(route) - 2)
f_last_voy_start = route[last_voy_start_idx]
f_last = route[-1]
if (f_last_voy_start != f_last
and f_dest_count[f_last_voy_start]) >= self.prbl.factory_max_vessels_destination[f_last_voy_start]:
illegal_removals.append((vessel, last_voy_start_idx))
return illegal_removals
def _init_factory_visits(self) -> Dict[str, List[str]]:
vessel_starting_times = list(self.prbl.start_times_for_vessels.items())
random.shuffle(vessel_starting_times) # add randomness for equal start times
vessel_starting_times.sort(key=lambda item: item[1])
factory_visits: Dict[str, List[str]] = {i: [] for i in self.prbl.factory_nodes}
for vessel, _ in vessel_starting_times:
initial_factory = self.prbl.vessel_initial_locations[vessel]
factory_visits[initial_factory].append(vessel)
return factory_visits
def _get_factory_visit_earliest_if_swapped(self, factory: str) -> List[int]:
"""Get the earliest time for an factory visit if the visit was swapped with the previous vessel visiting the
factory"""
quay_vessels = 0
quay_cap_incr_times = self.prbl.quay_cap_incr_times[factory] + [int_inf]
first_vessel = self.factory_visits[factory][0]
t = self.prbl.start_times_for_vessels[first_vessel] + 1
earliest = [t]
next_departure_t = int_inf
for i, vessel in enumerate(self.factory_visits[factory][:-1]):
next_vessel = self.factory_visits[factory][i + 1]
t = max(t, self.prbl.start_times_for_vessels[vessel] + 1)
loading_time = (self.prbl.loading_unloading_times[next_vessel, factory] * # next vessel's loading time
self._is_destination_factory(vessel, self.factory_visits_route_index[factory][i + 1]))
while quay_vessels >= self._get_min_quay_capacity(factory, t, loading_time):
if next_departure_t < quay_cap_incr_times[0] and quay_vessels > 0:
t = next_departure_t + 1
quay_vessels -= 1
else:
t = quay_cap_incr_times.pop(0)
earliest.append(max(t, self.prbl.start_times_for_vessels[next_vessel] + 1))
quay_vessels += 1
if len(self.routes[vessel]) > 1:
next_departure_t = t + self.prbl.loading_unloading_times[vessel, factory]
else:
next_departure_t = t
return earliest
def permute_factory_visits(self) -> bool:
# Find candidate pairs of vessels for permutation
cand_pairs = []
factories_with_visits = [f for f, visits in self.factory_visits.items() if visits]
for factory in factories_with_visits:
earliest_if_swapped_with_previous = self._get_factory_visit_earliest_if_swapped(factory)
for i, vessel in enumerate(self.factory_visits[factory][:-1]):
if (self.factory_visits_route_index[factory][i] != 0 or
self.factory_visits_route_index[factory][i + 1] != 0):
continue # skip if one of the factory visits are not the first for their route
next_vessel = self.factory_visits[factory][i + 1]
next_e = earliest_if_swapped_with_previous[i + 1] # last visits earliest if swapped
curr_loading_time = self.prbl.loading_unloading_times[vessel, factory]
next_loading_time = self.prbl.loading_unloading_times[next_vessel, factory]
# Assuming max 1 vessel may load simultaneously TODO?
min_quay_cap = self._get_min_quay_capacity(factory, next_e, next_loading_time + curr_loading_time)
if all([min_quay_cap > 0,
vessel != next_vessel,
next_e + next_loading_time < self.temp_l[vessel][0],
self.temp_e[vessel][0] + curr_loading_time >= self.prbl.start_times_for_vessels[next_vessel]]):
cand_pairs.append((factory, vessel, next_vessel, i, (self.temp_e[vessel][0], self.temp_l[vessel][0]), (next_e, self.temp_l[next_vessel][0])))
# print("Cands:", cand_pairs)
if not cand_pairs:
return False
# Choose a pair to permute
f, _, _, i, _, _ = random.choice(cand_pairs)
# Swap factory visits for the chosen pair
self.factory_visits[f][i], self.factory_visits[f][i+1] = self.factory_visits[f][i+1], self.factory_visits[f][i]
self.temp_factory_visits = {f: visits[:] for f, visits in self.factory_visits.items()}
self.recompute_solution_variables()
return True
def get_initial_factory_visits(self, factory: str):
init_factory_visits = []
for vessel, route_idx in zip(self.factory_visits[factory], self.factory_visits_route_index[factory]):
if route_idx == 0:
init_factory_visits.append(vessel)
return tuple(init_factory_visits)
def _is_destination_factory(self, vessel: str, route_index: int) -> bool:
return route_index == len(self.temp_routes[vessel]) - 1
def _get_min_quay_capacity(self, factory: str, t: int, loading_time: int):
"""Helper function that defines min_quay_capacity for t < 0, t >= no_time_periods and loading_time=0"""
no_time_periods = len(self.prbl.quay_capacity[factory])
if t + loading_time <= 0:
return self.prbl.quay_capacity[factory][0]
elif t < no_time_periods and loading_time:
return min(self.prbl.quay_capacity[factory][max(0, tau)]
for tau in range(t, min(no_time_periods, t + loading_time)))
elif t < no_time_periods and not loading_time:
return self.prbl.quay_capacity[factory][t]
else: # t >= no_time_periods
return self.prbl.quay_capacity[factory][-1]
def _get_factory_visit_insert_idx(self, factory: str, earliest: int):
visits = self.temp_factory_visits[factory]
visit_indices = self.temp_factory_visits_route_index[factory]
# insertion where all previous factory visits have e < earliest
i = bisect.bisect([self.temp_e[v][visit_indices[i]] for i, v in enumerate(visits)], earliest)
return i
def _get_factory_visit_idx(self, factory: str, vessel: str, route_idx: int) -> int:
# route_idx = len(self.routes[vessel]) + route_idx - 1 if route_idx < 0 else route_idx
for i, (v, idx) in enumerate(
zip(self.temp_factory_visits[factory], self.temp_factory_visits_route_index[factory])):
if v == vessel and idx == route_idx:
return i
raise IndexError(f"Route index {route_idx} does not exist for {vessel} visiting {factory}")
@staticmethod
def _ordered_list_min_threshold(ordered_list: List[int], min_threshold: int) -> List[int]:
"""Prunes the ordered_list to only contain values >= min_threshold"""
i = bisect.bisect(ordered_list, min_threshold)
return ordered_list[i:]
@staticmethod
def _ordered_list_max_threshold(ordered_list: List[int], max_threshold: int) -> List[int]:
"""Prunes the ordered_list to only contain values <= max_threshold"""
i = bisect.bisect_left(ordered_list, max_threshold)
return ordered_list[:i]
def _set_temp_vars_to_solution(self, solution: Solution) -> None:
self.temp_routes = solution.temp_routes
self.temp_e = solution.temp_e
self.temp_l = solution.temp_l
self.temp_factory_visits = solution.temp_factory_visits
self.temp_factory_visits_route_index = solution.temp_factory_visits_route_index
def get_orders_not_served(self) -> List[str]:
served_orders = set(o for v in self.prbl.vessels for o in self.routes[v]
if not self.prbl.nodes[o].is_factory)
unserved_orders = list(set(self.prbl.order_nodes) - served_orders)
return unserved_orders
def get_y_dict(self) -> Dict[Tuple[str, str, int], int]:
y_init_dict: Dict[Tuple[str, str, int], int] = {}
for v in self.prbl.vessels:
for i in self.prbl.nodes:
for t in self.prbl.time_periods:
y_init_dict[(v, i, t)] = 0
for v in self.prbl.vessels:
route = self.routes[v]
for idx in range(len(route)):
y_init_dict[(v, route[idx], self.l[v][idx])] = 1
return y_init_dict
def get_solution_hash(self) -> str:
relevant_solution_parts = [self.routes, self.factory_visits]
return joblib.hash(relevant_solution_parts)
def print_routes(self, highlight: List[Tuple[str, int]] = None):
highlight = [] if not highlight else highlight
error = False
for vessel, route in self.routes.items():
s = ''
for i, (node, e, l) in enumerate(zip(self.routes[vessel], self.e[vessel], self.l[vessel])):
if e > l:
s += f'{bcolors.FAIL}[ERROR]{bcolors.RESET_ALL} '
error = True
if (vessel, i) in highlight:
s += f'{bcolors.OKGREEN}{node} ({e},{l}){bcolors.RESET_ALL}, '
elif self.prbl.nodes[node].is_factory:
s += f'{bcolors.GREY}{node} ({e},{l}){bcolors.RESET_ALL}, '
else:
s += f'{node} ({e},{l}), '
print(f'{vessel}: {s}')
if error:
for factory in self.prbl.factory_nodes:
print(f"{factory}: {self.factory_visits[factory]}")
def print_temp_routes(self, highlight: List[Tuple[str, int]] = None):
highlight = [] if not highlight else highlight
for vessel, route in self.temp_routes.items():
s = ''
for i, (node, e, l) in enumerate(zip(self.temp_routes[vessel], self.temp_e[vessel], self.temp_l[vessel])):
if (vessel, i) in highlight:
s += f'{bcolors.OKGREEN}{node} ({e},{l}){bcolors.RESET_ALL}, '
elif i == len(self.temp_routes[vessel]) - 1 and self.prbl.nodes[node].is_factory:
s += f'{bcolors.GREY}{node} ({e},{l}){bcolors.RESET_ALL}'
else:
s += f'{node} ({e},{l}), '
print(f'{vessel}: {s}')
def print_factory_visits(self, highlight: List[Tuple[str, int]] = None):
highlight = [] if not highlight else highlight
for factory, visits in self.factory_visits.items():
s = ''
for vessel, route_idx in zip(visits, self.factory_visits_route_index[factory]):
if (vessel, route_idx) in highlight:
s += f'{bcolors.OKGREEN}{vessel} ({self.e[vessel][route_idx]}, {self.l[vessel][route_idx]})' \
f'{bcolors.RESET_ALL}, '
elif self._is_destination_factory(vessel, route_idx):
s += f'{bcolors.GREY}{vessel} ({self.e[vessel][route_idx]}, {self.l[vessel][route_idx]})' \
f'{bcolors.RESET_ALL}, '
else:
s += f'{vessel} ({self.e[vessel][route_idx]},{self.l[vessel][route_idx]}), '
print(f'{factory}: {s}')
def check_if_order_is_served_twice(self):
orders = set()
for vessel, route in self.routes.items():
for node_id in route:
if not self.prbl.nodes[node_id].is_factory:
if node_id in orders:
print(f"{node_id} appears multiple times!")
orders.add(node_id)
def check_insertion_feasibility_insert_and_print(self, insert_node_id: str, vessel: str, idx: int):
insert_node = self.prbl.nodes[insert_node_id]
print(
f"{bcolors.BOLD}Checking for insertion of node {insert_node_id} at index {idx} at route of vessel {vessel}{bcolors.ENDC}")
print("Utility of insertion:", self.get_insertion_utility(self.prbl.nodes[insert_node_id], vessel, idx))
feasibility = self.check_insertion_feasibility(insert_node_id, vessel, idx)
if not feasibility and self.debug:
print(f"{bcolors.FAIL}Infeasible insertion - node not inserted{bcolors.ENDC}")
print("> Final factory destination feasibility:",
self.check_final_factory_destination_feasibility(vessel, idx))
print("> Production feasibility:",
self.check_production_feasibility(vessel=vessel, idx=idx))
print("> Load feasibility:", self.check_load_feasibility(insert_node, vessel, idx))
print("> Number of products feasibility:", self.check_no_products_feasibility(insert_node, vessel, idx))
print("> Time feasibility:", self.check_time_feasibility(insert_node_id, vessel, idx))
self.clear_last_checked()
else:
print(f"{bcolors.OKGREEN}Insertion is feasible - inserting node{bcolors.ENDC}")
self.insert_last_checked()
print(vessel, self.routes[vessel])
print(list(zip(self.e[vessel], self.l[vessel])))
print()
return feasibility
if __name__ == '__main__':
problem = ProblemDataExtended('../../data/input_data/large_testcase.xlsx')
sol = Solution(problem)
insertions = [ # large testcase
('o_1', 'v_1', 1),
('f_1', 'v_1', 2),
('o_4', 'v_1', 2),
('o_2', 'v_1', 3),
('f_1', 'v_2', 1),
('o_1', 'v_2', 2),
('f_1', 'v_2', 3),
('o_9', 'v_3', 1),
('f_1', 'v_3', 2),
('o_6', 'v_3', 2),
('o_7', 'v_3', 2),
('o_8', 'v_3', 2),
]
for node, vessel, idx in insertions:
print(f'Inserting {node} into {vessel} at {idx}.')
if sol.check_insertion_feasibility(node, vessel, idx):
sol.insert_last_checked()
else:
sol.clear_last_checked()
sol.print_routes(highlight=[(vessel, idx)])
print()
sol.print_factory_visits(highlight=[(vessel, idx)])
print("\n\n")
# SMALL TESTCASE ONE VESSEL
# vessel='v_1'
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='o_1', vessel=vessel, idx=1)
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='o_2', vessel=vessel, idx=1)
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='o_3', vessel=vessel, idx=1)
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='f_1', vessel=vessel, idx=len(sol.routes[vessel]))
# MEDIUM TESTCASE
# vessel = 'v_1'
# print(f"INITIAL ROUTE, VESSEL {vessel}: {sol.routes[vessel]}")
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='o_5', vessel=vessel, idx=1)
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='o_4', vessel=vessel, idx=1)
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='f_2', vessel=vessel, idx=len(sol.routes[vessel]))
# vessel = 'v_2'
# print(f"INITIAL ROUTE, VESSEL {vessel}: {sol.routes[vessel]}")
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='o_3', vessel=vessel, idx=1)
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='o_1', vessel=vessel, idx=1)
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='o_6', vessel=vessel, idx=1)
# sol.check_insertion_feasibility_insert_and_print(insert_node_id='f_2', vessel=vessel, idx=len(sol.routes[vessel]))
# LARGE TESTCASE
# vessel = 'v_1'
# sol.check_insertion_feasibility_insert_and_print('o_6', vessel, 1)
# sol.check_insertion_feasibility_insert_and_print('o_4', vessel, 1)
# sol.check_insertion_feasibility_insert_and_print('f_1', vessel, len(sol.routes[vessel]))
# vessel = 'v_2'
# sol.check_insertion_feasibility_insert_and_print('o_9', vessel, 1)
# sol.check_insertion_feasibility_insert_and_print('o_11', vessel, 1)
# sol.check_insertion_feasibility_insert_and_print('f_2', vessel, len(sol.routes[vessel]))
# sol.check_insertion_feasibility_insert_and_print('o_3', vessel, 1)
# sol.check_insertion_feasibility_insert_and_print('o_5', vessel, 1)
|
the-stack_106_25077 | def fn():
x = []
if a % 2 == 1:
return "bad"
for i in range(len(b)):
if b[i] == c[i]:
return "bad"
if b[i] < c[i]:
x.append(b[i] + c[i])
else:
x.append(c[i] + b[i])
for i in x:
if x.count(i) != 2:
return "bad"
return "good"
a = int(input())
b = input().split()
c = input().split()
print(fn()) |
the-stack_106_25078 | import sys
import subprocess
import os
import collections
import re
def is_clang(command):
for word in command:
if '--compiler-bindir' in word and 'clang' in word:
return True
return False
def main():
try:
sys.argv.remove('--y_skip_nocxxinc')
skip_nocxxinc = True
except ValueError:
skip_nocxxinc = False
spl = sys.argv.index('--cflags')
command = sys.argv[1: spl]
cflags = sys.argv[spl + 1:]
dump_args = False
if '--y_dump_args' in command:
command.remove('--y_dump_args')
dump_args = True
executable = command[0]
if not os.path.exists(executable):
print >> sys.stderr, '{} not found'.format(executable)
sys.exit(1)
if is_clang(command):
cflags.append('-Wno-unused-parameter')
if not is_clang(command) and '-fopenmp=libomp' in cflags:
cflags.append('-fopenmp')
cflags.remove('-fopenmp=libomp')
skip_list = [
'-gline-tables-only',
# clang coverage
'-fprofile-instr-generate',
'-fcoverage-mapping',
'/Zc:inline', # disable unreferenced functions (kernel registrators) remove
'-Wno-c++17-extensions',
]
if skip_nocxxinc:
skip_list.append('-nostdinc++')
for flag in skip_list:
if flag in cflags:
cflags.remove(flag)
skip_prefix_list = [
'-fsanitize=',
'-fsanitize-coverage=',
'-fsanitize-blacklist=',
'--system-header-prefix',
]
for prefix in skip_prefix_list:
cflags = [i for i in cflags if not i.startswith(prefix)]
if not is_clang(command):
def good(arg):
if arg.startswith('--target='):
return False
if arg in ('-Wno-exceptions',
'-Wno-inconsistent-missing-override'):
return False
return True
cflags = filter(good, cflags)
cpp_args = []
compiler_args = []
# NVCC requires particular MSVC versions which may differ from the version
# used to compile regular C++ code. We have a separate MSVC in Arcadia for
# the CUDA builds and pass it's root in $Y_VC_Root.
# The separate MSVC for CUDA may absent in Yandex Open Source builds.
vc_root = os.environ.get('Y_VC_Root')
cflags_queue = collections.deque(cflags)
while cflags_queue:
arg = cflags_queue.popleft()
if arg[:2].upper() in ('-I', '/I', '-B'):
value = arg[2:]
if not value:
value = cflags_queue.popleft()
if arg[1] == 'I':
cpp_args.append('-I{}'.format(value))
elif arg[1] == 'B': # todo: delete "B" flag check when cuda stop to use gcc
pass
continue
match = re.match(r'[-/]D(.*)', arg)
if match:
define = match.group(1)
# We have C++ flags configured for the regular C++ build.
# There is Y_MSVC_INCLUDE define with a path to the VC header files.
# We need to change the path accordingly when using a separate MSVC for CUDA.
if vc_root and define.startswith('Y_MSVC_INCLUDE'):
define = os.path.expandvars('Y_MSVC_INCLUDE={}/include'.format(vc_root))
cpp_args.append('-D' + define.replace('\\', '/'))
continue
compiler_args.append(arg)
command += cpp_args
if compiler_args:
command += ['--compiler-options', ','.join(compiler_args)]
if dump_args:
sys.stdout.write('\n'.join(command))
else:
sys.exit(subprocess.Popen(command, stdout=sys.stderr, stderr=sys.stderr).wait())
if __name__ == '__main__':
main()
|
the-stack_106_25079 | # coding: utf-8
from __future__ import unicode_literals
import itertools
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
try_get,
)
CDN_API_BASE = 'https://cdn.younow.com/php/api'
MOMENT_URL_FORMAT = '%s/moment/fetch/id=%%s' % CDN_API_BASE
class YouNowLiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?younow\.com/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.younow.com/AmandaPadeezy',
'info_dict': {
'id': 'AmandaPadeezy',
'ext': 'mp4',
'is_live': True,
'title': 'March 26, 2017',
'thumbnail': r're:^https?://.*\.jpg$',
'tags': ['girls'],
'categories': ['girls'],
'uploader': 'AmandaPadeezy',
'uploader_id': '6716501',
'uploader_url': 'https://www.younow.com/AmandaPadeezy',
'creator': 'AmandaPadeezy',
},
'skip': True,
}
@classmethod
def suitable(cls, url):
return (False
if YouNowChannelIE.suitable(url) or YouNowMomentIE.suitable(url)
else super(YouNowLiveIE, cls).suitable(url))
def _real_extract(self, url):
username = self._match_id(url)
data = self._download_json(
'https://api.younow.com/php/api/broadcast/info/curId=0/user=%s'
% username, username)
if data.get('errorCode') != 0:
raise ExtractorError(data['errorMsg'], expected=True)
uploader = try_get(
data, lambda x: x['user']['profileUrlString'],
compat_str) or username
return {
'id': uploader,
'is_live': True,
'title': self._live_title(uploader),
'thumbnail': data.get('awsUrl'),
'tags': data.get('tags'),
'categories': data.get('tags'),
'uploader': uploader,
'uploader_id': data.get('userId'),
'uploader_url': 'https://www.younow.com/%s' % username,
'creator': uploader,
'view_count': int_or_none(data.get('viewers')),
'like_count': int_or_none(data.get('likes')),
'formats': [{
'url': '%s/broadcast/videoPath/hls=1/broadcastId=%s/channelId=%s'
% (CDN_API_BASE, data['broadcastId'], data['userId']),
'ext': 'mp4',
'protocol': 'm3u8',
}],
}
def _extract_moment(item, fatal=True):
moment_id = item.get('momentId')
if not moment_id:
if not fatal:
return
raise ExtractorError('Unable to extract moment id')
moment_id = compat_str(moment_id)
title = item.get('text')
if not title:
title = 'YouNow %s' % (
item.get('momentType') or item.get('titleType') or 'moment')
uploader = try_get(item, lambda x: x['owner']['name'], compat_str)
uploader_id = try_get(item, lambda x: x['owner']['userId'])
uploader_url = 'https://www.younow.com/%s' % uploader if uploader else None
entry = {
'extractor_key': 'YouNowMoment',
'id': moment_id,
'title': title,
'view_count': int_or_none(item.get('views')),
'like_count': int_or_none(item.get('likes')),
'timestamp': int_or_none(item.get('created')),
'creator': uploader,
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
'formats': [{
'url': 'https://hls.younow.com/momentsplaylists/live/%s/%s.m3u8'
% (moment_id, moment_id),
'ext': 'mp4',
'protocol': 'm3u8_native',
}],
}
return entry
class YouNowChannelIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?younow\.com/(?P<id>[^/]+)/channel'
_TEST = {
'url': 'https://www.younow.com/its_Kateee_/channel',
'info_dict': {
'id': '14629760',
'title': 'its_Kateee_ moments'
},
'playlist_mincount': 8,
}
def _entries(self, username, channel_id):
created_before = 0
for page_num in itertools.count(1):
if created_before is None:
break
info = self._download_json(
'%s/moment/profile/channelId=%s/createdBefore=%d/records=20'
% (CDN_API_BASE, channel_id, created_before), username,
note='Downloading moments page %d' % page_num)
items = info.get('items')
if not items or not isinstance(items, list):
break
for item in items:
if not isinstance(item, dict):
continue
item_type = item.get('type')
if item_type == 'moment':
entry = _extract_moment(item, fatal=False)
if entry:
yield entry
elif item_type == 'collection':
moments = item.get('momentsIds')
if isinstance(moments, list):
for moment_id in moments:
m = self._download_json(
MOMENT_URL_FORMAT % moment_id, username,
note='Downloading %s moment JSON' % moment_id,
fatal=False)
if m and isinstance(m, dict) and m.get('item'):
entry = _extract_moment(m['item'])
if entry:
yield entry
created_before = int_or_none(item.get('created'))
def _real_extract(self, url):
username = self._match_id(url)
channel_id = compat_str(self._download_json(
'https://api.younow.com/php/api/broadcast/info/curId=0/user=%s'
% username, username, note='Downloading user information')['userId'])
return self.playlist_result(
self._entries(username, channel_id), channel_id,
'%s moments' % username)
class YouNowMomentIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?younow\.com/[^/]+/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.younow.com/GABO.../20712117/36319236/3b316doc/m',
'md5': 'a30c70eadb9fb39a1aa3c8c0d22a0807',
'info_dict': {
'id': '20712117',
'ext': 'mp4',
'title': 'YouNow capture',
'view_count': int,
'like_count': int,
'timestamp': 1490432040,
'upload_date': '20170325',
'uploader': 'GABO...',
'uploader_id': 35917228,
},
}
@classmethod
def suitable(cls, url):
return (False
if YouNowChannelIE.suitable(url)
else super(YouNowMomentIE, cls).suitable(url))
def _real_extract(self, url):
video_id = self._match_id(url)
item = self._download_json(MOMENT_URL_FORMAT % video_id, video_id)
return _extract_moment(item['item'])
|
the-stack_106_25080 | class Solution:
def judgeCircle(self, moves: str) -> bool:
x, y = 0, 0
for move in moves:
if move == "U":
y += 1
elif move == "D":
y -= 1
elif move == "L":
x -= 1
elif move == "R":
x += 1
return x == 0 and y == 0
if __name__ == "__main__":
print(Solution().judgeCircle("UD"))
print(Solution().judgeCircle("LL"))
|
the-stack_106_25081 | from django.shortcuts import render
from django.core.files.storage import default_storage
from rest_framework.response import Response
from rest_framework import generics, mixins
from rest_framework.parsers import MultiPartParser
from rest_framework.decorators import APIView
from rest_framework.permissions import IsAuthenticated
from dashboard.pagination import CustomPagination
from users.authentication import jwtAuthentication
from .serializers import ProductSerializer
from .models import Product
# Create your views here.
class ProductGenericAPIView(generics.GenericAPIView, mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.CreateModelMixin,
mixins.UpdateModelMixin, mixins.DestroyModelMixin):
authentication_classes = [jwtAuthentication]
permission_classes = [IsAuthenticated]
queryset = Product.objects.all()
serializer_class = ProductSerializer
pagination_class = CustomPagination
def get(self, request, pk = None):
if pk:
return Response({
'data': self.retrieve(request, pk).data
})
return self.list(request)
def post(self, request):
return Response({
'data': self.create(request, pk).data
})
def put(self, request, pk=None):
return Response({
'data': self.partial_update(request, pk).data
})
def delete(self, request, pk=None):
return self.destroy(request, pk)
class FileUploadView(APIView):
authentication_classes = [jwtAuthentication]
permission_classes = [IsAuthenticated]
parser_classes = [MultiPartParser, ]
def post(self, request):
file = request.FILES['image']
file_name = default_storage.save(file.name, file)
url = default_storage.url(file_name)
return Response({
'url': 'http://localhost:8000/api' + url
}) |
the-stack_106_25082 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from airflow.hooks.docker_hook import DockerHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.file import TemporaryDirectory
from docker import APIClient, tls
import ast
class DockerOperator(BaseOperator):
"""
Execute a command inside a docker container.
A temporary directory is created on the host and
mounted into a container to allow storing files
that together exceed the default disk size of 10GB in a container.
The path to the mounted directory can be accessed
via the environment variable ``AIRFLOW_TMP_DIR``.
If a login to a private registry is required prior to pulling the image, a
Docker connection needs to be configured in Airflow and the connection ID
be provided with the parameter ``docker_conn_id``.
:param image: Docker image from which to create the container.
If image tag is omitted, "latest" will be used.
:type image: str
:param api_version: Remote API version. Set to ``auto`` to automatically
detect the server's version.
:type api_version: str
:param command: Command to be run in the container. (templated)
:type command: str or list
:param cpus: Number of CPUs to assign to the container.
This value gets multiplied with 1024. See
https://docs.docker.com/engine/reference/run/#cpu-share-constraint
:type cpus: float
:param dns: Docker custom DNS servers
:type dns: list of strings
:param dns_search: Docker custom DNS search domain
:type dns_search: list of strings
:param docker_url: URL of the host running the docker daemon.
Default is unix://var/run/docker.sock
:type docker_url: str
:param environment: Environment variables to set in the container. (templated)
:type environment: dict
:param force_pull: Pull the docker image on every run. Default is False.
:type force_pull: bool
:param mem_limit: Maximum amount of memory the container can use.
Either a float value, which represents the limit in bytes,
or a string like ``128m`` or ``1g``.
:type mem_limit: float or str
:param network_mode: Network mode for the container.
:type network_mode: str
:param tls_ca_cert: Path to a PEM-encoded certificate authority
to secure the docker connection.
:type tls_ca_cert: str
:param tls_client_cert: Path to the PEM-encoded certificate
used to authenticate docker client.
:type tls_client_cert: str
:param tls_client_key: Path to the PEM-encoded key used to authenticate docker client.
:type tls_client_key: str
:param tls_hostname: Hostname to match against
the docker server certificate or False to disable the check.
:type tls_hostname: str or bool
:param tls_ssl_version: Version of SSL to use when communicating with docker daemon.
:type tls_ssl_version: str
:param tmp_dir: Mount point inside the container to
a temporary directory created on the host by the operator.
The path is also made available via the environment variable
``AIRFLOW_TMP_DIR`` inside the container.
:type tmp_dir: str
:param user: Default user inside the docker container.
:type user: int or str
:param volumes: List of volumes to mount into the container, e.g.
``['/host/path:/container/path', '/host/path2:/container/path2:ro']``.
:param working_dir: Working directory to
set on the container (equivalent to the -w switch the docker client)
:type working_dir: str
:param xcom_push: Does the stdout will be pushed to the next step using XCom.
The default is False.
:type xcom_push: bool
:param xcom_all: Push all the stdout or just the last line.
The default is False (last line).
:type xcom_all: bool
:param docker_conn_id: ID of the Airflow connection to use
:type docker_conn_id: str
:param shm_size: Size of ``/dev/shm`` in bytes. The size must be
greater than 0. If omitted uses system default.
:type shm_size: int
"""
template_fields = ('command', 'environment',)
template_ext = ('.sh', '.bash',)
@apply_defaults
def __init__(
self,
image,
api_version=None,
command=None,
cpus=1.0,
docker_url='unix://var/run/docker.sock',
environment=None,
force_pull=False,
mem_limit=None,
network_mode=None,
tls_ca_cert=None,
tls_client_cert=None,
tls_client_key=None,
tls_hostname=None,
tls_ssl_version=None,
tmp_dir='/tmp/airflow',
user=None,
volumes=None,
working_dir=None,
xcom_push=False,
xcom_all=False,
docker_conn_id=None,
dns=None,
dns_search=None,
shm_size=None,
*args,
**kwargs):
super(DockerOperator, self).__init__(*args, **kwargs)
self.api_version = api_version
self.command = command
self.cpus = cpus
self.dns = dns
self.dns_search = dns_search
self.docker_url = docker_url
self.environment = environment or {}
self.force_pull = force_pull
self.image = image
self.mem_limit = mem_limit
self.network_mode = network_mode
self.tls_ca_cert = tls_ca_cert
self.tls_client_cert = tls_client_cert
self.tls_client_key = tls_client_key
self.tls_hostname = tls_hostname
self.tls_ssl_version = tls_ssl_version
self.tmp_dir = tmp_dir
self.user = user
self.volumes = volumes or []
self.working_dir = working_dir
self.xcom_push_flag = xcom_push
self.xcom_all = xcom_all
self.docker_conn_id = docker_conn_id
self.shm_size = shm_size
self.cli = None
self.container = None
def get_hook(self):
return DockerHook(
docker_conn_id=self.docker_conn_id,
base_url=self.docker_url,
version=self.api_version,
tls=self.__get_tls_config()
)
def execute(self, context):
self.log.info('Starting docker container from image %s', self.image)
tls_config = self.__get_tls_config()
if self.docker_conn_id:
self.cli = self.get_hook().get_conn()
else:
self.cli = APIClient(
base_url=self.docker_url,
version=self.api_version,
tls=tls_config
)
if self.force_pull or len(self.cli.images(name=self.image)) == 0:
self.log.info('Pulling docker image %s', self.image)
for l in self.cli.pull(self.image, stream=True):
output = json.loads(l.decode('utf-8').strip())
if 'status' in output:
self.log.info("%s", output['status'])
with TemporaryDirectory(prefix='airflowtmp') as host_tmp_dir:
self.environment['AIRFLOW_TMP_DIR'] = self.tmp_dir
self.volumes.append('{0}:{1}'.format(host_tmp_dir, self.tmp_dir))
self.container = self.cli.create_container(
command=self.get_command(),
environment=self.environment,
host_config=self.cli.create_host_config(
binds=self.volumes,
network_mode=self.network_mode,
shm_size=self.shm_size,
dns=self.dns,
dns_search=self.dns_search,
cpu_shares=int(round(self.cpus * 1024)),
mem_limit=self.mem_limit),
image=self.image,
user=self.user,
working_dir=self.working_dir
)
self.cli.start(self.container['Id'])
line = ''
for line in self.cli.logs(container=self.container['Id'], stream=True):
line = line.strip()
if hasattr(line, 'decode'):
line = line.decode('utf-8')
self.log.info(line)
result = self.cli.wait(self.container['Id'])
if result['StatusCode'] != 0:
raise AirflowException('docker container failed: ' + repr(result))
if self.xcom_push_flag:
return self.cli.logs(container=self.container['Id']) \
if self.xcom_all else str(line)
def get_command(self):
if self.command is not None and self.command.strip().find('[') == 0:
commands = ast.literal_eval(self.command)
else:
commands = self.command
return commands
def on_kill(self):
if self.cli is not None:
self.log.info('Stopping docker container')
self.cli.stop(self.container['Id'])
def __get_tls_config(self):
tls_config = None
if self.tls_ca_cert and self.tls_client_cert and self.tls_client_key:
tls_config = tls.TLSConfig(
ca_cert=self.tls_ca_cert,
client_cert=(self.tls_client_cert, self.tls_client_key),
verify=True,
ssl_version=self.tls_ssl_version,
assert_hostname=self.tls_hostname
)
self.docker_url = self.docker_url.replace('tcp://', 'https://')
return tls_config
|
the-stack_106_25084 | from itertools import count
import numpy as np
def day25(inp):
floor = np.array([list(row) for row in inp.splitlines()])
shape = floor.shape
kind_codes = '>v'
deltas = np.array([(0, 1), (1, 0)])
poses = [np.array((floor == kind_code).nonzero()) for kind_code in kind_codes]
# two lists with shape (2, n1) and (2, n2) for the two herds:
# for each herd and each axis n_specimens indices
# convert 2d indices to linear indices for faster lookup later
poses = [np.ravel_multi_index(poses_now, shape) for poses_now in poses]
n_east = poses[0].size
all_poses = np.concatenate(poses)
poses = all_poses[:n_east], all_poses[n_east:]
# now both elements of poses are a view into the same underlying array
for i in count(1):
could_move = False
for kind, delta in enumerate(deltas):
poses_now = poses[kind]
cucumbs = np.unravel_index(poses_now, shape) # shape (n_herd, 2) proper 2d indices
next_poses = np.ravel_multi_index(cucumbs + delta[:, None], shape, mode='wrap') # shape (n_herd,) 1d indices
# find free cucumbers
free_poses_mask = ~np.in1d(next_poses, all_poses)
poses_now[free_poses_mask] = next_poses[free_poses_mask]
if free_poses_mask.any():
# this herd could move
could_move = True
if not could_move:
# we're done
return i
if __name__ == "__main__":
testinp = open('day25.testinp').read()
print(day25(testinp))
inp = open('day25.inp').read()
print(day25(inp))
|
the-stack_106_25085 | """Utility functions, node construction macros, etc."""
# Author: Collin Winter
# Local imports
from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
###########################################################
### Common node-construction "macros"
###########################################################
def KeywordArg(keyword, value):
return Node(syms.argument,
[keyword, Leaf(token.EQUAL, "="), value])
def LParen():
return Leaf(token.LPAR, "(")
def RParen():
return Leaf(token.RPAR, ")")
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
source.prefix = " "
source = [source]
return Node(syms.atom,
target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
def Name(name, prefix=None):
"""Return a NAME leaf"""
return Leaf(token.NAME, name, prefix=prefix)
def Attr(obj, attr):
"""A node tuple for obj.attr"""
return [obj, Node(syms.trailer, [Dot(), attr])]
def Comma():
"""A comma leaf"""
return Leaf(token.COMMA, ",")
def Dot():
"""A period (.) leaf"""
return Leaf(token.DOT, ".")
def ArgList(args, lparen=LParen(), rparen=RParen()):
"""A parenthesised argument list, used by Call()"""
node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
if args:
node.insert_child(1, Node(syms.arglist, args))
return node
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.prefix = prefix
return node
def Newline():
"""A newline literal"""
return Leaf(token.NEWLINE, "\n")
def BlankLine():
"""A blank line"""
return Leaf(token.NEWLINE, "")
def Number(n, prefix=None):
return Leaf(token.NUMBER, n, prefix=prefix)
def Subscript(index_node):
"""A numeric or string subscript"""
return Node(syms.trailer, [Leaf(token.LBRACE, "["),
index_node,
Leaf(token.RBRACE, "]")])
def String(string, prefix=None):
"""A string leaf"""
return Leaf(token.STRING, string, prefix=prefix)
def ListComp(xp, fp, it, test=None):
"""A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
"""
xp.prefix = ""
fp.prefix = " "
it.prefix = " "
for_leaf = Leaf(token.NAME, "for")
for_leaf.prefix = " "
in_leaf = Leaf(token.NAME, "in")
in_leaf.prefix = " "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.prefix = " "
if_leaf = Leaf(token.NAME, "if")
if_leaf.prefix = " "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, "["),
inner,
Leaf(token.RBRACE, "]")])
def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
from package import name_leafs"""
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
# "not been tested with dotted package names -- use at your own "\
# "peril!"
for leaf in name_leafs:
# Pull the leaves out of their old tree
leaf.remove()
children = [Leaf(token.NAME, "from"),
Leaf(token.NAME, package_name, prefix=" "),
Leaf(token.NAME, "import", prefix=" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp
def ImportAndCall(node, results, names):
"""Returns an import statement and calls a method
of the module:
import module
module.name()"""
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = Node(syms.power,
Attr(Name(names[0]), Name(names[1])) +
[Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.prefix = node.prefix
return new
###########################################################
### Determine whether a node represents a given literal
###########################################################
def is_tuple(node):
"""Does the node represent a tuple literal?"""
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
return True
return (isinstance(node, Node)
and len(node.children) == 3
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
and node.children[0].value == "("
and node.children[2].value == ")")
def is_list(node):
"""Does the node represent a list literal?"""
return (isinstance(node, Node)
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
and node.children[0].value == "["
and node.children[-1].value == "]")
###########################################################
### Misc
###########################################################
def parenthesize(node):
return Node(syms.atom, [LParen(), node, RParen()])
consuming_calls = {"sorted", "list", "set", "any", "all", "tuple", "sum",
"min", "max", "enumerate"}
def attr_chain(obj, attr):
"""Follow an attribute chain.
If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
use this to iterate over all objects in the chain. Iteration is
terminated by getattr(x, attr) is None.
Args:
obj: the starting object
attr: the name of the chaining attribute
Yields:
Each successive object in the chain.
"""
next = getattr(obj, attr)
while next:
yield next
next = getattr(next, attr)
p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p2 = """
power<
( 'sorted' | 'enumerate' )
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
pats_built = False
def in_special_context(node):
""" Returns true if node is in an environment where all that is required
of it is being iterable (ie, it doesn't matter if it returns a list
or an iterator).
See test_map_nochange in test_fixers.py for some examples and tests.
"""
global p0, p1, p2, pats_built
if not pats_built:
p0 = patcomp.compile_pattern(p0)
p1 = patcomp.compile_pattern(p1)
p2 = patcomp.compile_pattern(p2)
pats_built = True
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
def is_probably_builtin(node):
"""
Check that something isn't an attribute or function name etc.
"""
prev = node.prev_sibling
if prev is not None and prev.type == token.DOT:
# Attribute lookup.
return False
parent = node.parent
if parent.type in (syms.funcdef, syms.classdef):
return False
if parent.type == syms.expr_stmt and parent.children[0] is node:
# Assignment.
return False
if parent.type == syms.parameters or \
(parent.type == syms.typedargslist and (
(prev is not None and prev.type == token.COMMA) or
parent.children[0] is node
)):
# The name of an argument.
return False
return True
def find_indentation(node):
"""Find the indentation of *node*."""
while node is not None:
if node.type == syms.suite and len(node.children) > 2:
indent = node.children[1]
if indent.type == token.INDENT:
return indent.value
node = node.parent
return ""
###########################################################
### The following functions are to find bindings in a suite
###########################################################
def make_suite(node):
if node.type == syms.suite:
return node
node = node.clone()
parent, node.parent = node.parent, None
suite = Node(syms.suite, [node])
suite.parent = parent
return suite
def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
node = node.parent
if not node:
raise ValueError("root found before file_input node was found.")
return node
def does_tree_import(package, name, node):
""" Returns true if name is imported from package at the
top level of the tree which node belongs to.
To cover the case of an import like 'import foo', use
None for the package and 'foo' for the name. """
binding = find_binding(name, find_root(node), package)
return bool(binding)
def is_import(node):
"""Returns true if the node is an import statement."""
return node.type in (syms.import_name, syms.import_from)
def touch_import(package, name, node):
""" Works like `does_tree_import` but adds an import statement
if it was not imported. """
def is_import_stmt(node):
return (node.type == syms.simple_stmt and node.children and
is_import(node.children[0]))
root = find_root(node)
if does_tree_import(package, name, root):
return
# figure out where to insert the new import. First try to find
# the first import and then skip to the last one.
insert_pos = offset = 0
for idx, node in enumerate(root.children):
if not is_import_stmt(node):
continue
for offset, node2 in enumerate(root.children[idx:]):
if not is_import_stmt(node2):
break
insert_pos = idx + offset
break
# if there are no imports where we can insert, find the docstring.
# if that also fails, we stick to the beginning of the file
if insert_pos == 0:
for idx, node in enumerate(root.children):
if (node.type == syms.simple_stmt and node.children and
node.children[0].type == token.STRING):
insert_pos = idx + 1
break
if package is None:
import_ = Node(syms.import_name, [
Leaf(token.NAME, "import"),
Leaf(token.NAME, name, prefix=" ")
])
else:
import_ = FromImport(package, [Leaf(token.NAME, name, prefix=" ")])
children = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children))
_def_syms = {syms.classdef, syms.funcdef}
def find_binding(name, node, package=None):
""" Returns the node which binds variable name, otherwise None.
If optional argument package is supplied, only imports will
be returned.
See test cases for examples."""
for child in node.children:
ret = None
if child.type == syms.for_stmt:
if _find(name, child.children[1]):
return child
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type in (syms.if_stmt, syms.while_stmt):
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type == syms.try_stmt:
n = find_binding(name, make_suite(child.children[2]), package)
if n:
ret = n
else:
for i, kid in enumerate(child.children[3:]):
if kid.type == token.COLON and kid.value == ":":
# i+3 is the colon, i+4 is the suite
n = find_binding(name, make_suite(child.children[i+4]), package)
if n: ret = n
elif child.type in _def_syms and child.children[1].value == name:
ret = child
elif _is_import_binding(child, name, package):
ret = child
elif child.type == syms.simple_stmt:
ret = find_binding(name, child, package)
elif child.type == syms.expr_stmt:
if _find(name, child.children[0]):
ret = child
if ret:
if not package:
return ret
if is_import(ret):
return ret
return None
_block_syms = {syms.funcdef, syms.classdef, syms.trailer}
def _find(name, node):
nodes = [node]
while nodes:
node = nodes.pop()
if node.type > 256 and node.type not in _block_syms:
nodes.extend(node.children)
elif node.type == token.NAME and node.value == name:
return node
return None
def _is_import_binding(node, name, package=None):
""" Will return node if node will import name, or node
will import * from package. None is returned otherwise.
See test cases for examples. """
if node.type == syms.import_name and not package:
imp = node.children[1]
if imp.type == syms.dotted_as_names:
for child in imp.children:
if child.type == syms.dotted_as_name:
if child.children[2].value == name:
return node
elif child.type == token.NAME and child.value == name:
return node
elif imp.type == syms.dotted_as_name:
last = imp.children[-1]
if last.type == token.NAME and last.value == name:
return node
elif imp.type == token.NAME and imp.value == name:
return node
elif node.type == syms.import_from:
# str(...) is used to make life easier here, because
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
if package and str(node.children[1]).strip() != package:
return None
n = node.children[3]
if package and _find("as", n):
# See test_from_import_as for explanation
return None
elif n.type == syms.import_as_names and _find(name, n):
return node
elif n.type == syms.import_as_name:
child = n.children[2]
if child.type == token.NAME and child.value == name:
return node
elif n.type == token.NAME and n.value == name:
return node
elif package and n.type == token.STAR:
return node
return None
|
the-stack_106_25087 | import angr
import logging
l = logging.getLogger(name=__name__)
class atol(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, s):
strtol = angr.SIM_PROCEDURES['libc']['strtol']
return strtol.strtol_inner(s, self.state, self.state.memory, 10, True)[1]
|
the-stack_106_25088 | from collections import deque
from collections import namedtuple
import numpy as np
import cv2
from ..engine.base_service import BaseService
from ..engine.adapters import get as get_adapter
from ..utils.generic_utils import log
from ..utils.output_utils import draw_objects_np
from ..utils.output_utils import label_to_color_image
from ..utils.output_utils import draw_tracking_sparse
class EmptyService(BaseService):
"""
A service that does nothing and just passes the first input through
itself. Used for testing other modules and services.
"""
def __init__(self, service, *args, **kwargs):
super().__init__(
adapter=get_adapter('simple')([service]),
*args, **kwargs)
def _generator(self):
while True:
yield self._get_inputs(0)
class PerformanceBar(BaseService):
"""Writes performance statistics info on a bar on top of received image.
Following statistics are calculated and displayed:
* FPS (Frames Per Second)
* Delay (time difference between when the frames were received and
when it is last processed.
# Arguments
service: a BaseService object that returns an 3-channel image
n_frames: how much frames should be taken into account when the
performance is calculated. If set to 0, all frames would be
used.
"""
BacklogItem = namedtuple('BacklogItem', 'timestamp delay')
def __init__(self, service, n_frames=200, *args, **kwargs):
self.n_frames = n_frames
super().__init__(
adapter=get_adapter('simple')({'service': service}),
*args, **kwargs)
def _generator(self):
total_delay = 0.
frame_counter = 0
start_time = None
if self.n_frames > 0:
performance_log = deque()
while True:
# After `_get_inputs`, we will have a full history tape of this
# package for analyze (the last entry will be this class).
image = self._get_inputs('service')
assert isinstance(image, np.ndarray) and len(image.shape) == 3 \
and image.shape[2] == 3 and image.dtype == np.uint8
# Delay between the timestamp of first introduction of the
# frame and the timestamp of when it's processed the last
delay = self._history_tape[-1].timestamp - \
self._history_tape[0].timestamp
# Accumulating statistics
if self.n_frames > 0:
performance_log.append(PerformanceBar.BacklogItem(
self._history_tape[-1].timestamp, delay))
if len(performance_log) > self.n_frames:
el = performance_log.popleft()
total_delay -= el.delay
if len(performance_log) == 1:
continue
else:
frame_counter += 1
if start_time is None:
start_time = self._history_tape[-1].timestamp
continue
total_delay += delay
# Calculating performance statistics
if self.n_frames > 0:
lst = list(performance_log)
avg_fps = len(performance_log) / (
lst[-1].timestamp - lst[0].timestamp + 1e-6)
avg_delay = total_delay / len(performance_log)
else:
now = self._history_tape[-1].timestamp
avg_fps = float(frame_counter) / (now - start_time)
avg_delay = total_delay / frame_counter
# Draw a status bar on top of the image
tx_color = (255, 255, 255)
bg_color = (0, 0, 0)
thickness = 1
padding = (5, 5)
font_scale = 0.6
font = cv2.FONT_HERSHEY_DUPLEX
text = 'FPS: {:.1f} | Delay: {:.1f}ms'.format(
avg_fps, avg_delay * 1000.)
(tw, th), baseline = cv2.getTextSize(
text, font, fontScale=font_scale, thickness=thickness)
if tw > image.shape[1]:
log.warning(
'Status bar is too wide for image ({} > {})'.format(
tw, image.shape[1]))
xmin, ymax = padding[0], th + 2 * padding[1]
display_img = np.empty(
shape=(
image.shape[0] + th + 2 * padding[1],
image.shape[1], 3),
dtype=np.uint8)
display_img[:th + 2 * padding[1], :, :] = np.array(bg_color)
display_img[th + 2 * padding[1]:, :, :] = image
cv2.putText(display_img, text,
(xmin + padding[0], ymax - padding[1]),
font, fontScale=font_scale,
color=tx_color, thickness=thickness,
lineType=cv2.LINE_AA)
# Return the image with status bar on top
yield display_img
class DetectronDraw(BaseService):
"""
Aggregation and drawing of various vision services. Supported services
and required output formats:
- Object Detectors. Should output a tuple of two elements: a list of
`DetectionObject`, defined in `utils/output_utils.py`, and the
labels (a list of names for each id).
- Image Segmentators. Should output a color map together with labels
(a dict of names for each color).
# Arguments:
image_stream: the service that feeds images to all computer vision
services in the pipeline. Usually it's the camera service.
detector: (optional) object detection service, output format of it
should follow the format described above.
segmentator: (optional) semantic segmentation service, its output
format should follow the format described above.
"""
def __init__(self,
image_stream,
detector=None,
segmentator=None,
tracker=None,
contours=None):
self._has_detector = False
self._has_segmentator = False
self._has_tracker = False
self._has_contours = False
input_services = {'image_stream': image_stream}
if detector is not None:
self._has_detector = True
input_services['detector'] = detector
if segmentator is not None:
self._has_segmentator = True
input_services['segmentator'] = segmentator
if tracker is not None:
self._has_tracker = True
input_services['tracker'] = tracker
if contours is not None:
self._has_contours = True
input_services['contours'] = contours
super().__init__(adapter=get_adapter('sync')(input_services))
def _generator(self):
while True:
ret_val = self._get_inputs(
'image_stream', 'detector', 'segmentator', 'tracker',
'contours')
image, detections, segmentation, tracking, contours = ret_val
image = self._safe_resolve_input(image, readonly=False)
if image is None:
log.warning(
'`image_stream` yielded None (expected behavior), '
'continue.')
continue
assert isinstance(image, np.ndarray) and len(image.shape) == 3 \
and image.shape[2] == 3 and image.dtype == np.uint8
if self._has_segmentator and segmentation is not None:
assert isinstance(segmentation, np.ndarray) and \
len(segmentation.shape) == 2
vis_res = label_to_color_image(
segmentation.astype(np.int)).astype(np.uint8)
if vis_res.shape != image.shape:
vis_res = cv2.resize(
vis_res, (image.shape[1], image.shape[0]))
image = 2 * (vis_res // 3) + image // 3
if self._has_detector and detections is not None:
objects, labels = detections
draw_objects_np(image, objects, labels)
if self._has_tracker and tracking is not None:
prev_points, cur_points = tracking
draw_tracking_sparse(image, prev_points, cur_points)
if self._has_contours and contours is not None:
cv2.drawContours(image, contours, -1, (0, 255, 255), 2)
yield image
|
the-stack_106_25092 | from . import Bing, Google
import argparse
def main():
parser = argparse.ArgumentParser(
description="Scrape images from the internet.")
parser.add_argument(
"engine", help="Which search engine should be used? (Bing/Google)")
parser.add_argument(
"query", help="Query that should be used to scrape images.")
parser.add_argument(
"--limit", help="Amount of images to be scraped.", default=1000, required=False)
parser.add_argument("--json", help="Should image metadata be downloaded?",
action='store_true', required=False)
parser.add_argument(
"--url", help="Google: Scrape images from a google image search link", required=False) # Google Specific
parser.add_argument("--adult-filter", help="Disable adult filter",
action='store_true', required=False) # Bing Specific
args = parser.parse_args()
if str(args.engine).lower() == 'google':
search_engine = Google.Engine()
elif str(args.engine).lower() == 'bing':
search_engine = Bing.Engine()
results = search_engine.search_images(
query=args.query, delta=int(args.limit), adult_content=args.adult_filter)
if __name__=="__main__":
main() |
the-stack_106_25094 | from __future__ import print_function
import __main__ as main
import os
os.environ['https_proxy'] = 'https://localhost:1087'
import fcntl
import pandas as pd
import mysql.connector
import numpy as np
import tensorflow as tf
from pstk import data as dat
from sqlalchemy import create_engine
from joblib import Parallel, delayed
from pstk.model.wavenet import time_to_batch
from google.cloud import storage as gcs
from corl.wc_data import input_file2
import re
def testGetFileName():
print(main.__file__)
def testEmbedding():
sess = tf.compat.v1.InteractiveSession()
word_embeddings = tf.compat.v1.get_variable(
"word_embeddings", [5, 5], initializer=tf.compat.v1.truncated_normal_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
word_ids = [0, 0, 1, 2]
embedded_word_ids = tf.nn.embedding_lookup(params=word_embeddings, ids=word_ids)
r = embedded_word_ids.eval()
print("{}".format(r))
print(r.shape)
def getIndices(x, n):
print("x:{}".format(x.get_shape()))
indices = tf.stack([tf.fill([n], x[0]), [
x[1]-n+i for i in range(n)]], axis=1)
print(indices.get_shape())
return indices
def testGatherND():
# gather last 2 elements of (2, 5, 2), -> (2, 2, 2)
# indices = np.asarray([[[0, 1], [0, 2]], [[1, 0], [1, 1]]])
params = np.asarray([[['a0', 'b0'], ['c0', 'd0'], ['e0', 'f0'], ['g0', 'h0'], ['i0', 'j0']],
[['a1', 'b1'], ['c1', 'd1'], ['e1', 'f1'], ['g1', 'h1'], ['0', '0']]])
batch = 2
n = 2
length = tf.compat.v1.placeholder(tf.int32, shape=[None])
mapinput = tf.stack([tf.range(batch), length], axis=1)
print("mapinput: {}".format(mapinput.get_shape()))
indices = tf.map_fn(lambda x: getIndices(
x, n), mapinput)
# [tf.stack([tf.constant(b, shape=[batch]), [
# s-n+i for i in range(n)]], axis=1) for b, s in enumerate(length)]
sess = tf.compat.v1.InteractiveSession()
gnd = tf.gather_nd(params, indices)
i, r = sess.run([indices, gnd], feed_dict={length: [5, 4]})
print(i)
print(params.shape)
print(r.shape)
print("{}".format(r))
def testTensorShape():
x = tf.compat.v1.placeholder(shape=[None, 16], dtype=tf.float32)
d = tf.compat.v1.placeholder(shape=[], dtype=tf.float32)
random_tensor = tf.random.uniform(tf.shape(input=x), dtype=tf.float32)
print("random_tensor: {}".format(random_tensor.get_shape()))
kept_idx = tf.greater_equal(random_tensor, 1.0 - d)
print("kept_idx: {}".format(kept_idx.get_shape()))
def delayedFunc(i):
return i+1, i*2, i**2
def testJoblib():
r = Parallel(n_jobs=5)(delayed(delayedFunc)(i) for i in range(30))
r1, r2, r3 = zip(*r)
print("r1 ({}):{}".format(type(list(r1)), r1))
print("r2:{}".format(r2))
print("r3:{}".format(r3))
def testVariableScope():
a = 3
if 1 < 3:
a = a+1
else:
a = a-1
print(a)
def testTimeToBatch():
inputs = tf.constant([[['a0', 'b0'], ['c0', 'd0'], ['e0', 'f0'], ['g0', 'h0'], ['i0', 'j0']],
[['a1', 'b1'], ['c1', 'd1'], ['e1', 'f1'], ['g1', 'h1'], ['0', '0']]])
print(inputs.get_shape())
ttb = time_to_batch(inputs, 2)
print(ttb.get_shape())
sess = tf.compat.v1.InteractiveSession()
r = sess.run([ttb])
print(r)
def testConv1d():
# inputs = tf.constant([[[1, 0, 1],
# [0, 1, 0],
# [1, 0, 1],
# [1, 1, 1],
# [1, 1, 1]]], dtype=tf.float32)
inputs = tf.constant([[[1],
[2],
[3],
[4],
[5]]], dtype=tf.float32)
# kernel = tf.constant([[[6]],[[7]]], dtype=tf.float32)
print("shape:{}".format(inputs.get_shape()))
# c = tf.nn.conv1d(inputs, kernel, stride=1, padding='VALID')
c = tf.compat.v1.layers.conv1d(inputs, filters=1, kernel_size=2, strides=1,
padding='VALID', use_bias=False)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run([c])
print(out)
def testInversePerm():
x = tf.constant(
[[3, 2, 1, 0], [2, 3, 0, 1]],
dtype=tf.int32)
with tf.compat.v1.Session() as sess:
print(sess.run([tf.math.invert_permutation(x)]))
def testNestFlatten():
x = tf.constant(
[[3, 2, 1, 0], [2, 3, 0, 1]],
dtype=tf.int32)
with tf.compat.v1.Session() as sess:
print(sess.run([tf.nest.flatten(x)]))
def testReduceProdCumprod():
x_h = tf.compat.v1.placeholder(tf.float32, [None, 2, 4])
x = np.array(
[[[3, 2, 1, 4], [2, 3, 4, 1]],
[[4, 5, 6, 3], [5, 2, 1, 7]],
[[5, 7, 8, 9], [6, 7, 3, 3]]],
float
)
rp = tf.reduce_prod(input_tensor=x, axis=[1])
cp = tf.math.cumprod(x, 1, reverse=True)
size = tf.shape(input=cp)[0]
p1 = tf.range(tf.cast(size, tf.float32), dtype=tf.float32)
p2 = tf.zeros([size], tf.float32)
print("p1:{} p2:{}".format(p1.get_shape(), p2.get_shape()))
mr = list(tf.map_fn(lambda p: (
p[0], p[1]), (p1, p2), dtype=(tf.float32, tf.float32)))
# print("map shape:{}".format(mr.get_shape()))
indices = tf.stack(mr, 1)
print(indices.get_shape())
gcp = tf.gather_nd(cp, tf.cast(indices, tf.int32))
with tf.compat.v1.Session() as sess:
r1, r2, r3, idx = sess.run([rp, cp, gcp, indices], feed_dict={x_h: x})
print("result of reduce_prod:\n{}".format(r1))
print("result of cumprod:\n{}".format(r2))
print("result of gathered cumprod:\n{}".format(r3))
print("indices:\n{}".format(idx))
def testCosDecay():
LEARNING_RATE = 1e-3
LEARNING_RATE_ALPHA = 0.1
LR_DECAY_STEPS = 10
step = tf.compat.v1.placeholder(tf.int32, [])
dlr = tf.compat.v1.train.cosine_decay_restarts(
learning_rate=LEARNING_RATE,
global_step=step,
first_decay_steps=LR_DECAY_STEPS,
t_mul=1.0,
m_mul=1.0,
alpha=LEARNING_RATE_ALPHA
)
with tf.compat.v1.Session() as sess:
for i in range(100):
print(sess.run([dlr], feed_dict={step: i+1000}))
def testFoldl():
x_h = tf.compat.v1.placeholder(tf.int32, [None, 5])
x = np.array(
[[3, 4, 0, 2, 1],
[2, 4, 3, 0, 1]]
)
fd = tf.foldl(
lambda a, b: tf.stack(a, tf.math.invert_permutation(b)), x_h)
with tf.compat.v1.Session() as sess:
r = sess.run(fd, feed_dict={x_h: x})
print(r)
def invert_permutation():
x_h = tf.compat.v1.placeholder(tf.float32, [None, 5])
x = np.array(
[[3, 4, 0, 2, 1],
[2, 1, 3, 4, 0]],
float
)
dim = int(x_h.get_shape()[-1])
size = tf.cast(tf.shape(input=x_h)[0], tf.float32)
delta = tf.cast(tf.shape(input=x_h)[-1], tf.float32)
rg = tf.range(0, size*delta, delta, dtype=tf.float32)
rg = tf.reshape(rg, [-1, 1])
rg = tf.tile(rg, [1, dim])
x_a = tf.add(x_h, rg)
flat = tf.reshape(x_a, [-1])
iperm = tf.math.invert_permutation(tf.cast(flat, tf.int32))
rs = tf.reshape(iperm, [-1, dim])
rs_f = tf.subtract(rs, tf.cast(rg, tf.int32))
with tf.compat.v1.Session() as sess:
r_rg = sess.run(rg, feed_dict={x_h: x})
print("rg:{}".format(r_rg))
r = sess.run(flat, feed_dict={x_h: x})
print(r)
r_rs = sess.run(rs_f, feed_dict={x_h: x})
print("final:\n{}".format(r_rs))
check = sess.run(tf.math.invert_permutation([2, 1, 3, 4, 0]))
print("check:\n{}".format(check))
def batch_gatcher():
values = tf.constant(
[[1, 2, 3, 4],
[5, 6, 7, 8]]
)
indices = tf.constant(
[[2, 3, 0, 1],
[3, 1, 2, 0]]
)
idxf = tf.cast(indices, tf.float32)
size = tf.shape(input=indices)[0]
rg = tf.range(tf.cast(size, tf.float32), dtype=tf.float32)
rg = tf.expand_dims(rg, -1)
rg = tf.tile(rg, [1, int(indices.get_shape()[-1])])
rg = tf.expand_dims(rg, -1)
print("rg:{}".format(rg.get_shape()))
idxf = tf.expand_dims(idxf, -1)
print("idxf: {}".format(idxf.get_shape()))
gidx = tf.concat([rg, idxf], -1)
gidx = tf.cast(gidx, tf.int32)
# target gidx: (2,2,2)
# [[[0, 2], [0, 3], [0, 0], [0, 1]],
# [[1, 3], [1, 1], [1, 2], [1, 0]]]
# target output:
# [[3 4 1 2]
# [8 6 7 5]]
gn = tf.gather_nd(values, gidx)
with tf.compat.v1.Session() as sess:
r_rg, ridx, r = sess.run([rg, gidx, gn])
print("r_rg:\n{}".format(r_rg))
print("ridx:\n{}".format(ridx))
print("r:\n{}".format(r))
def dynamicShape():
x_h = tf.compat.v1.placeholder(tf.int32, [])
x_p = tf.compat.v1.placeholder(tf.int32, [None])
x_p.set_shape(tf.TensorShape([x_h]))
def reshape():
c = tf.constant(
[[2, 3, 4, 1],
[3, 7, 5, 2]]
)
c = tf.reduce_prod(input_tensor=c)
c1 = tf.reshape(c, [1])
c2 = [tf.reduce_prod(input_tensor=c)]
with tf.compat.v1.Session() as sess:
out = sess.run([c1, c2])
print(out[0])
print(out[1])
def regex():
p = re.compile('((?!while/).)*(conv2d|Conv|MatMul)')
print(p.match('this/should/match/MatMul123/asdf'))
print(p.match('while/this/should/not/match/MatMul123/asdf'))
print(p.match('the/middle/while/should/not/match/MatMul123/asdf'))
print(p.match('RNN/rnn/while/dnc/lstm/MatMul'))
def filterTensor():
ts = None
with tf.compat.v1.name_scope("while"):
ts = tf.multiply(1, 2)
ts1 = None
with tf.compat.v1.name_scope("start/while"):
ts1 = tf.multiply(3, 4)
ts2 = tf.multiply(5, 6)
print(ts.op.name)
print(ts1.op.name)
print(ts2.op.name)
f = tf.contrib.graph_editor.filter_ts_from_regex(
[ts.op, ts1.op, ts2.op],
'^(?!while)*(conv2d|Conv|MatMul|Mul)'
# '(/Mul)'
)
with tf.compat.v1.Session() as sess:
o = sess.run(f)
print(o)
def testGCS():
project = "linen-mapper-187215"
bucket_name = "carusytes_bucket"
prefix = "wcc_infer/vol_0"
gcs_client = gcs.Client(project)
print("client created")
bucket = gcs_client.get_bucket(bucket_name)
print("bucket initialized")
blobs = bucket.list_blobs(prefix=prefix)
print("blobs fetched")
for i, b in enumerate(blobs):
if i >= 5:
break
print(b.id[b.id.find('/')+1:b.id.rfind('/')])
def delayed_write_talst(i, talst):
sep = ' | '
with open(input_file2.TASKLIST_FILE, 'rb+') as f:
# fcntl.flock(f, fcntl.LOCK_EX)
t = talst[i]
idx = t['idx']
f.seek(idx)
ln = f.readline()
idx = idx + ln.find(sep)+len(sep)
print("readline: {}, idx:{}".format(ln,idx))
f.seek(idx)
f.write('O')
f.flush()
# fcntl.flock(f, fcntl.LOCK_UN)
def print_talst_element(i, talst):
with open(input_file2.TASKLIST_FILE, 'rb+') as f:
t = talst[i]
idx = idx = t['idx']
f.seek(idx)
ln = f.readline()
print("readline: {}, idx:{}".format(ln,idx))
def testTasklist():
project = "linen-mapper-187215"
talst = input_file2._get_infer_tasklist(
'gs://carusytes_bucket/wcc_infer', project)
print(talst)
# print('#talst: {}'.format(len(talst)))
# for i in range(50):
# print_talst_element(i, talst)
# test efficient status update
# for i in range(50):
# delayed_write_talst(i, talst)
# print("job done")
# r = Parallel(n_jobs=8)(delayed(delayed_write_talst)(i, talst) for i in range(50))
# if len(r) == 50:
# print("job done")
# testGatherND()
# testGetFileName()
# print(__file__)
# f = __file__
# print(f[f.rindex('/')+1:f.rindex('.py')])
# testTensorShape()
# testTasklist()
# filterTensor()
|
the-stack_106_25095 | # Copyright 2018 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import hashlib
import base64
import cbor
from dgt_sdk.processor.handler import TransactionHandler
from dgt_sdk.processor.exceptions import InvalidTransaction
from dgt_sdk.processor.exceptions import InternalError
from bgt_common.protobuf.smart_bgt_token_pb2 import BgtTokenInfo
from dgt_signing.secp256k1 import Secp256k1PrivateKey, Secp256k1PublicKey, Secp256k1Context
from dgt_signing import CryptoFactory,create_context
LOGGER = logging.getLogger(__name__)
VALID_VERBS = 'set', 'inc', 'dec','trans'
MIN_VALUE = 0
MAX_VALUE = 4294967295
MAX_NAME_LENGTH = 20
FAMILY_NAME = 'bgt'
BGT_ADDRESS_PREFIX = hashlib.sha512(FAMILY_NAME.encode('utf-8')).hexdigest()[0:6]
def make_bgt_address(name):
return BGT_ADDRESS_PREFIX + hashlib.sha512(name.encode('utf-8')).hexdigest()[-64:]
class BgtTransactionHandler(TransactionHandler):
def __init__(self):
self._context = create_context('secp256k1')
LOGGER.debug('_do_set: context')
self._private_key = Secp256k1PrivateKey.new_random()
LOGGER.debug('_do_set: context private_key=%s',self._private_key.as_hex())
self._public_key = self._context.get_public_key(self._private_key)
crypto_factory = CryptoFactory(self._context)
self._signer = crypto_factory.new_signer(self._private_key)
#self._signer = CryptoFactory(self._context).new_signer(self.private_key)
LOGGER.debug('_do_set: public_key=%s ',self._public_key.as_hex())
LOGGER.info('BgtTransactionHandler init DONE')
@property
def family_name(self):
return FAMILY_NAME
@property
def family_versions(self):
return ['1.0']
@property
def namespaces(self):
return [BGT_ADDRESS_PREFIX]
def apply(self, transaction, context):
LOGGER.debug('apply:....\n')
verb, name, value, to = _unpack_transaction(transaction)
LOGGER.debug('apply:verb=%s name=%s value=%s to=%s',verb, name, value, to)
state = _get_state_data(name,to, context)
updated_state = self._do_bgt(verb, name, value, to, state)
_set_state_data( updated_state, context)
def _do_bgt(self,verb, name, value, to, state):
verbs = {
'set': self._do_set,
'inc': self._do_inc,
'dec': self._do_dec,
'trans': self._do_trans,
}
LOGGER.debug('_do_bgt request....')
try:
return verbs[verb](name, value,to, state)
except KeyError:
# This would be a programming error.
raise InternalError('Unhandled verb: {}'.format(verb))
def _do_set(self,name, value, to, state):
msg = 'Setting "{n}" to {v}'.format(n=name, v=value)
LOGGER.debug(msg)
if name in state:
raise InvalidTransaction('Verb is "set", but already exists: Name: {n}, Value {v}'.format(n=name,v=state[name]))
updated = {k: v for k, v in state.items()}
#owner_key = self._context.sign('BGT_token'.encode(),self._private_key)
token = BgtTokenInfo(group_code = 'BGT_token',
owner_key = self._signer.sign('BGT_token'.encode()), #owner_key,
sign = self._public_key.as_hex(),
decimals = int(value)
)
updated[name] = token.SerializeToString()
LOGGER.debug('_do_set updated=%s',updated)
return updated
def _do_inc(self,name, value, to, state):
msg = 'Incrementing "{n}" by {v}'.format(n=name, v=value)
LOGGER.debug(msg)
if name not in state:
raise InvalidTransaction(
'Verb is "inc" but name "{}" not in state'.format(name))
curr = state[name]
token = BgtTokenInfo()
token.ParseFromString(curr)
LOGGER.debug('_do_inc token[%s]=%s',token.group_code,value) # token.decimals
incd = token.decimals + value
if incd > MAX_VALUE:
raise InvalidTransaction(
'Verb is "inc", but result would be greater than {}'.format(MAX_VALUE))
updated = {k: v for k, v in state.items()}
token.decimals = incd
updated[name] = token.SerializeToString()
return updated
def _do_dec(self,name, value, to, state):
msg = 'Decrementing "{n}" by {v}'.format(n=name, v=value)
LOGGER.debug(msg)
if name not in state:
raise InvalidTransaction(
'Verb is "dec" but name "{}" not in state'.format(name))
curr = state[name]
token = BgtTokenInfo()
token.ParseFromString(curr)
LOGGER.debug('_do_dec token[%s]=%s',token.group_code,token.decimals,value)
decd = token.decimals - value
if decd < MIN_VALUE:
raise InvalidTransaction(
'Verb is "dec", but result would be less than {}'.format(
MIN_VALUE))
updated = {k: v for k, v in state.items()}
token.decimals = decd
updated[name] = token.SerializeToString()
return updated
def _do_trans(self,vfrom, value, vto, state):
msg = 'transfer "{n}"->"{t}" by {v}'.format(n=vfrom,t=vto, v=value)
LOGGER.debug(msg)
if vfrom not in state or vto not in state:
raise InvalidTransaction(
'Verb is "trans" but vallet "{}" or vallet "{}" not in state'.format(vfrom,vto))
curr = state[vfrom]
token = BgtTokenInfo()
token.ParseFromString(curr)
to = state[vto]
token1 = BgtTokenInfo()
token1.ParseFromString(to)
LOGGER.debug('_do_tans token[%s]=%s',token.group_code,value)
decd = token.decimals - value
if decd < MIN_VALUE:
raise InvalidTransaction('Verb is "trans", but result would be less than {}'.format(MIN_VALUE))
incd = token1.decimals + value
if incd > MAX_VALUE:
raise InvalidTransaction('Verb is "inc", but result would be greater than {}'.format(MAX_VALUE))
updated = {k: v for k, v in state.items()}
token.decimals = decd
updated[vfrom] = token.SerializeToString()
token1.decimals = incd
updated[vto] = token1.SerializeToString()
return updated
def _unpack_transaction(transaction):
verb, name, value, to = _decode_transaction(transaction)
_validate_verb(verb)
_validate_name(name)
_validate_value(value)
if to is not None:
_validate_name(to)
return verb, name, value, to
def _decode_transaction(transaction):
try:
content = cbor.loads(transaction.payload)
except:
raise InvalidTransaction('Invalid payload serialization')
LOGGER.debug('_decode_transaction content=%s',content)
try:
verb = content['Verb']
except AttributeError:
raise InvalidTransaction('Verb is required')
try:
name = content['Name']
except AttributeError:
raise InvalidTransaction('Name is required')
try:
value = content['Value']
except AttributeError:
raise InvalidTransaction('Value is required')
LOGGER.debug('_decode_transaction verb=%s',verb)
if verb == 'trans' :
if 'To' not in content :
raise InvalidTransaction('To is required')
to = content['To']
else:
to = None
return verb, name, value, to
def _validate_verb(verb):
if verb not in VALID_VERBS:
raise InvalidTransaction('Verb must be "set","trans", "inc", or "dec"')
def _validate_name(name):
if not isinstance(name, str) or len(name) > MAX_NAME_LENGTH:
raise InvalidTransaction('Name must be a string of no more than {} characters'.format(MAX_NAME_LENGTH))
def _validate_value(value):
if not isinstance(value, int) or value < 0 or value > MAX_VALUE:
raise InvalidTransaction(
'Value must be an integer '
'no less than {i} and no greater than {a}'.format(
i=MIN_VALUE,
a=MAX_VALUE))
def _get_state_data(name,to, context):
states = [make_bgt_address(name)]
if to is not None:
states.append(make_bgt_address(to))
state_entries = context.get_state(states)
try:
states = {}
for entry in state_entries:
state = cbor.loads(entry.data)
LOGGER.debug('_get_state_data state=(%s)', state)
for key, val in state.items():
LOGGER.debug('_get_state_data add=%s', key)
states[key] = val
return states
except IndexError:
return {}
except:
LOGGER.debug('_get_state_data: Failed to load state data')
raise InvalidTransaction('Failed to load state data')
def _set_state_data( state, context):
new_states = {}
for key,val in state.items():
LOGGER.debug('_set_state_data [%s]=%s', key, val)
address = make_bgt_address(key)
encoded = cbor.dumps({key: val})
new_states[address] = encoded
addresses = context.set_state(new_states)
if not addresses:
raise InternalError('State error')
|
the-stack_106_25098 | #!/usr/bin/env python2.7
import boto3
import rx
from poll import Poll
from calculate import Sum
from update import Update
from delete import Delete
import logging
logging.getLogger(
'botocore.vendored.requests.packages.urllib3.connectionpool'
).setLevel(logging.CRITICAL)
logging.getLogger('boto3.resources.action').setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
session = boto3.session.Session(region_name='ap-northeast-1')
sqs = session.resource('sqs')
sqs_client = sqs.meta.client
db = session.resource('dynamodb').meta.client
def block(r):
pass
def handler(event, context):
logger.info("Start!")
poll = Poll()
cal = Sum()
update = Update()
delete = Delete()
table = event['table']
queue_url = event['queueUrl']
message_count = event['messageCount']
def on_error(e):
raise e
def on_poll_completed():
logger.info("Receive API count: {}".format(poll.fetch_count))
logger.info("Fetched messages: {}".format(poll.message_count))
update_and_delete()
def update_and_delete_one(key):
updated_message_ids = update.execute(db, table, cal.stats[key])
return delete.execute(sqs_client, queue_url, updated_message_ids)
def update_and_delete():
delete_results = []
async_one = rx.Observable.to_async(update_and_delete_one)
for key in cal.stats:
delete_results.append(async_one(key))
rx.Observable.merge(delete_results).to_blocking().for_each(block)
on_next_message = cal.add
messages = poll.messages(sqs, queue_url, message_count).to_blocking()
messages_observer = rx.Observer(on_next_message,
on_error,
on_poll_completed)
messages.subscribe(messages_observer)
messages.for_each(block)
logger.info("Update API count: {}".format(update.update_count))
logger.info("Delete API count: {}".format(delete.delete_count))
logger.info("Delete Message count: {}".format(delete.message_count))
|
the-stack_106_25099 | import logging
import wrapt
import ddtrace
log = logging.getLogger(__name__)
# To set attributes on wrapt proxy objects use this prefix:
# http://wrapt.readthedocs.io/en/latest/wrappers.html
_DD_PIN_NAME = '_datadog_pin'
_DD_PIN_PROXY_NAME = '_self_' + _DD_PIN_NAME
class Pin(object):
"""Pin (a.k.a Patch INfo) is a small class which is used to
set tracing metadata on a particular traced connection.
This is useful if you wanted to, say, trace two different
database clusters.
>>> conn = sqlite.connect("/tmp/user.db")
>>> # Override a pin for a specific connection
>>> pin = Pin.override(conn, service="user-db")
>>> conn = sqlite.connect("/tmp/image.db")
"""
__slots__ = ['app', 'app_type', 'tags', 'tracer', '_target', '_config', '_initialized']
def __init__(self, service, app=None, app_type=None, tags=None, tracer=None, _config=None):
tracer = tracer or ddtrace.tracer
self.app = app
self.app_type = app_type
self.tags = tags
self.tracer = tracer
self._target = None
# keep the configuration attribute internal because the
# public API to access it is not the Pin class
self._config = _config or {}
# [Backward compatibility]: service argument updates the `Pin` config
self._config['service_name'] = service
self._initialized = True
@property
def service(self):
"""Backward compatibility: accessing to `pin.service` returns the underlying
configuration value.
"""
return self._config['service_name']
def __setattr__(self, name, value):
if getattr(self, '_initialized', False) and name is not '_target':
raise AttributeError("can't mutate a pin, use override() or clone() instead")
super(Pin, self).__setattr__(name, value)
def __repr__(self):
return "Pin(service=%s, app=%s, app_type=%s, tags=%s, tracer=%s)" % (
self.service, self.app, self.app_type, self.tags, self.tracer)
@staticmethod
def _find(*objs):
"""
Return the first :class:`ddtrace.pin.Pin` found on any of the provided objects or `None` if none were found
>>> pin = Pin._find(wrapper, instance, conn, app)
:param *objs: The objects to search for a :class:`ddtrace.pin.Pin` on
:type objs: List of objects
:rtype: :class:`ddtrace.pin.Pin`, None
:returns: The first found :class:`ddtrace.pin.Pin` or `None` is none was found
"""
for obj in objs:
pin = Pin.get_from(obj)
if pin:
return pin
return None
@staticmethod
def get_from(obj):
"""Return the pin associated with the given object. If a pin is attached to
`obj` but the instance is not the owner of the pin, a new pin is cloned and
attached. This ensures that a pin inherited from a class is a copy for the new
instance, avoiding that a specific instance overrides other pins values.
>>> pin = Pin.get_from(conn)
:param obj: The object to look for a :class:`ddtrace.pin.Pin` on
:type obj: object
:rtype: :class:`ddtrace.pin.Pin`, None
:returns: :class:`ddtrace.pin.Pin` associated with the object, or None if none was found
"""
if hasattr(obj, '__getddpin__'):
return obj.__getddpin__()
pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME
pin = getattr(obj, pin_name, None)
# detect if the PIN has been inherited from a class
if pin is not None and pin._target != id(obj):
pin = pin.clone()
pin.onto(obj)
return pin
@classmethod
def override(cls, obj, service=None, app=None, app_type=None, tags=None, tracer=None):
"""Override an object with the given attributes.
That's the recommended way to customize an already instrumented client, without
losing existing attributes.
>>> conn = sqlite.connect("/tmp/user.db")
>>> # Override a pin for a specific connection
>>> Pin.override(conn, service="user-db")
"""
if not obj:
return
pin = cls.get_from(obj)
if not pin:
pin = Pin(service)
pin.clone(
service=service,
app=app,
app_type=app_type,
tags=tags,
tracer=tracer,
).onto(obj)
def enabled(self):
"""Return true if this pin's tracer is enabled. """
return bool(self.tracer) and self.tracer.enabled
def onto(self, obj, send=True):
"""Patch this pin onto the given object. If send is true, it will also
queue the metadata to be sent to the server.
"""
# pinning will also queue the metadata for service submission. this
# feels a bit side-effecty, but bc it's async and pretty clearly
# communicates what we want, i think it makes sense.
if send:
try:
self._send()
except Exception:
log.debug("can't send pin info", exc_info=True)
# Actually patch it on the object.
try:
if hasattr(obj, '__setddpin__'):
return obj.__setddpin__(self)
pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME
# set the target reference; any get_from, clones and retarget the new PIN
self._target = id(obj)
return setattr(obj, pin_name, self)
except AttributeError:
log.debug("can't pin onto object. skipping", exc_info=True)
def remove_from(self, obj):
# Remove pin from the object.
try:
pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME
pin = Pin.get_from(obj)
if pin is not None:
delattr(obj, pin_name)
except AttributeError:
log.debug('can\'t remove pin from object. skipping', exc_info=True)
def clone(self, service=None, app=None, app_type=None, tags=None, tracer=None):
"""Return a clone of the pin with the given attributes replaced."""
# do a shallow copy of Pin dicts
if not tags and self.tags:
tags = self.tags.copy()
# we use a copy instead of a deepcopy because we expect configurations
# to have only a root level dictionary without nested objects. Using
# deepcopy introduces a big overhead:
#
# copy: 0.00654911994934082
# deepcopy: 0.2787208557128906
config = self._config.copy()
return Pin(
service=service or self.service,
app=app or self.app,
app_type=app_type or self.app_type,
tags=tags,
tracer=tracer or self.tracer, # do not clone the Tracer
_config=config,
)
def _send(self):
self.tracer.set_service_info(
service=self.service,
app=self.app,
app_type=self.app_type,
)
|
the-stack_106_25100 | """
A module for constructing Hamiltonians for marginal reconstruction and variational 2-RDM theory.
functionality to transform molecular integrals into the appropriate Tensor objects
"""
from itertools import product
import numpy as np
from scipy.linalg import block_diag
from representability.fermions.basis_utils import geminal_spin_basis
from representability.tensor import Tensor
def spin_orbital_interaction_tensor(two_body_int, one_body_int):
"""
Construct the cost operator
:param two_body_int: two-body integrals in spin-orbital basis
:param one_body_int: one-body integral in spin-orbital basis
"""
opdm_interaction_tensor = Tensor(one_body_int, name='ck')
tpdm_interaction_tensor = Tensor(two_body_int, name='cckk')
return opdm_interaction_tensor, tpdm_interaction_tensor
def spin_adapted_interaction_tensor(two_body_int, one_body_int):
"""
Construct the cost operator in symmetric and antisymmetric basis
The spin-orbital integrals are in the spin-less fermion basis.
Spin-full fermions are index by even/odd
:param two_body_int:
:param one_body_int:
:return:
"""
sp_dim = int(one_body_int.shape[0] / 2)
one_body_spatial_int = np.zeros((sp_dim, sp_dim), dtype=float)
even_set = one_body_int[::2, ::2].copy()
for p, q in product(range(sp_dim), repeat=2):
one_body_spatial_int[p, q] = one_body_int[2 * p, 2 * q]
assert np.allclose(even_set, one_body_spatial_int)
opdm_a_interaction = Tensor(one_body_spatial_int, name='ck_a')
opdm_b_interaction = Tensor(one_body_spatial_int, name='ck_b')
aa_dim = int(sp_dim * (sp_dim - 1) / 2)
ab_dim = int(sp_dim**2)
v2aa = np.zeros((aa_dim, aa_dim))
v2bb = np.zeros_like(v2aa)
v2ab = np.zeros((ab_dim, ab_dim))
b_aa_dict = {}
b_ab_dict = {}
cnt, cnt2 = 0, 0
for i, j in product(range(sp_dim), repeat=2):
if i < j:
b_aa_dict[(i, j)] = cnt
cnt += 1
b_ab_dict[(i, j)] = cnt2
cnt2 += 1
for p, q, r, s in product(range(sp_dim), repeat=4):
if p < q and r < s:
# 0.5 still there because antisymmetric basis becomes <ij|kl> -
# <ij|lk>. The 0.5 for coulomb interaction counting is still
# needed to avoid double counting
v2aa[b_aa_dict[(p, q)], b_aa_dict[(r, s)]] = 0.5 * (two_body_int[2 * p, 2 * q, 2 * r, 2 * s] - two_body_int[2 * p, 2 * q, 2 * s, 2 * r])
v2bb[b_aa_dict[(p, q)], b_aa_dict[(r, s)]] = 0.5 * (two_body_int[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1] - two_body_int[2 * p + 1, 2 * q + 1, 2 * s + 1, 2 * r + 1])
v2ab[b_ab_dict[(p, q)], b_ab_dict[(r, s)]] = two_body_int[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]
bas_aa, bas_ab = geminal_spin_basis(sp_dim)
v2ab = Tensor(v2ab, basis=bas_ab, name='cckk_ab')
v2bb = Tensor(v2bb, basis=bas_aa, name='cckk_bb')
v2aa = Tensor(v2aa, basis=bas_aa, name='cckk_aa')
return opdm_a_interaction, opdm_b_interaction, v2aa, v2bb, v2ab
def spin_adapted_interaction_tensor_rdm_consistent(two_body_int, one_body_int):
"""
Construct the cost operator in symmetric and antisymmetric basis
The spin-orbital integrals are in the spin-less fermion basis.
Spin-full fermions are index by even/odd
:param two_body_int:
:param one_body_int:
:return:
"""
sp_dim = int(one_body_int.shape[0] / 2)
one_body_spatial_int = np.zeros((sp_dim, sp_dim), dtype=float)
even_set = one_body_int[::2, ::2].copy()
for p, q in product(range(sp_dim), repeat=2):
one_body_spatial_int[p, q] = one_body_int[2 * p, 2 * q]
assert np.allclose(even_set, one_body_spatial_int)
opdm_a_interaction = Tensor(one_body_spatial_int, name='ck_a')
opdm_b_interaction = Tensor(one_body_spatial_int, name='ck_b')
aa_dim = int(sp_dim * (sp_dim - 1) / 2)
ab_dim = int(sp_dim**2)
v2aa = np.zeros((aa_dim, aa_dim))
v2bb = np.zeros_like(v2aa)
v2ab = np.zeros((ab_dim, ab_dim))
b_aa_dict = {}
b_ab_dict = {}
cnt, cnt2 = 0, 0
for i, j in product(range(sp_dim), repeat=2):
if i < j:
b_aa_dict[(i, j)] = cnt
cnt += 1
b_ab_dict[(i, j)] = cnt2
cnt2 += 1
for p, q, r, s in product(range(sp_dim), repeat=4):
if p < q and r < s:
# 0.5 still there because antisymmetric basis becomes <ij|kl> -
# <ij|lk>. The 0.5 for coulomb interaction counting is still
# needed to avoid double counting
v2aa[b_aa_dict[(p, q)], b_aa_dict[(r, s)]] = 0.5 * (two_body_int[2 * p, 2 * q, 2 * r, 2 * s] -
two_body_int[2 * p, 2 * q, 2 * s, 2 * r] -
two_body_int[2 * q, 2 * p, 2 * r, 2 * s] +
two_body_int[2 * q, 2 * p, 2 * s, 2 * r])
v2bb[b_aa_dict[(p, q)], b_aa_dict[(r, s)]] = 0.5 * (two_body_int[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1] -
two_body_int[2 * p + 1, 2 * q + 1, 2 * s + 1, 2 * r + 1] -
two_body_int[2 * q + 1, 2 * p + 1, 2 * r + 1, 2 * s + 1] +
two_body_int[2 * q + 1, 2 * p + 1, 2 * s + 1, 2 * r + 1])
v2ab[b_ab_dict[(p, q)], b_ab_dict[(r, s)]] = two_body_int[2 * p, 2 * q + 1, 2 * r, 2 * s + 1] + \
two_body_int[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]
bas_aa, bas_ab = geminal_spin_basis(sp_dim)
v2ab = Tensor(v2ab, basis=bas_ab, name='cckk_ab')
v2bb = Tensor(v2bb, basis=bas_aa, name='cckk_bb')
v2aa = Tensor(v2aa, basis=bas_aa, name='cckk_aa')
return opdm_a_interaction, opdm_b_interaction, v2aa, v2bb, v2ab
def make_sz_spin_adapted_hamiltonian(oei, tei):
"""
Make the sz spin-adapted Hamiltonian tensors
tei = <i, j, k, l> corresponds to i(1)* j(2)* k(2) l(1)
To derive
x, y are spin-variable index
0.5 * sum{pqrs}V_{pqrs} sum_{x, y} (px)^ (qy)^ (ry) (sx)
:param oei: spatial one-electron integrals
:param tei: spatial two-electron integrals
"""
sdim = oei.shape[0]
bas_aa = {}
bas_ab = {}
cnt_aa = 0
cnt_ab = 0
for p, q in product(range(sdim), repeat=2):
if p < q:
bas_aa[(p, q)] = cnt_aa
cnt_aa += 1
bas_ab[(p, q)] = cnt_ab
cnt_ab += 1
v2aa = np.zeros((sdim * (sdim - 1) // 2, sdim * (sdim - 1) // 2))
v2ab = np.zeros((sdim * sdim , sdim * sdim))
rev_bas_aa = dict(zip(bas_aa.values(), bas_aa.keys()))
rev_bas_ab = dict(zip(bas_ab.values(), bas_ab.keys()))
for r, s in product(range(len(bas_aa)), repeat=2):
i, j = rev_bas_aa[r]
k, l = rev_bas_aa[s]
v2aa[r, s] = 0.5 * (tei[i, j, l, k] - tei[j, i, l, k] -
tei[i, j, k, l] + tei[j, i, k, l])
for r, s in product(range(len(bas_ab)), repeat=2):
i, j = rev_bas_ab[r]
k, l = rev_bas_ab[s]
# we don't multiply by 0.5 because we count alpha-beta and beta-alpha
v2ab[r, s] = tei[i, j, l, k]
opdm_a = Tensor(oei, name='ck_a')
opdm_b = Tensor(oei, name='ck_b')
bas_aa, bas_ab = geminal_spin_basis(sdim)
v2ab = Tensor(v2ab, basis=bas_ab, name='cckk_ab')
v2bb = Tensor(v2aa, basis=bas_aa, name='cckk_bb')
v2aa = Tensor(v2aa, basis=bas_aa, name='cckk_aa')
return opdm_a, opdm_b, v2aa, v2bb, v2ab
def spin_orbital_marginal_norm_min(dim, tensor_name='ME', basis=None):
"""
Construct the cost operator as the trace over free variables
quadrant indexing
[0, 0] | [0, 1]
---------------
[1, 0] | [1, 1]
I | E
-----
E | F
Example:
Mat =
[ 0, 1, 2, 3,| 4, 5, 6, 7]
[ 8, 9, 10, 11,| 12, 13, 14, 15]
[16, 17, 18, 19,| 20, 21, 22, 23]
[24, 25, 26, 27,| 28, 29, 30, 31]
---------------------------------
[32, 33, 34, 35,| 36, 37, 38, 39]
[40, 41, 42, 43,| 44, 45, 46, 47]
[48, 49, 50, 51,| 52, 53, 54, 55]
[56, 57, 58, 59,| 60, 61, 62, 63]
M = 2
for p, q in product(range(M), repeat=2):
Mat[p*M + q + 1 * M**2, p*M + q + 1 * M**2] = 1.0
:param Int dim: 2 * dim is the size of the super-block
:param String tensor_name: name to index the tensor by
:param Bijection basis: Default None. basis for off-diagonals of superblock
"""
zero_block = np.zeros((dim, dim))
eye_block = np.eye(dim)
cost_tensor = block_diag(zero_block, eye_block)
cost = Tensor(cost_tensor, basis=basis, name=tensor_name)
return cost
|
the-stack_106_25103 | import json
from summary.model import DockSummary
from summary.schema import DockSummarySchema
def load(dock_file: str) -> DockSummary:
try:
with open(dock_file) as json_file:
data = json.load(json_file)
return DockSummarySchema().load(data)
except FileNotFoundError:
return DockSummary()
def save(dock_file: str, summary: DockSummary):
with open(dock_file, "w") as json_file:
data = DockSummarySchema().dump(summary)
json.dump(obj=data, fp=json_file, indent=4)
|
the-stack_106_25104 | #!/usr/bin/env python3
"""Script to check whether the installation is done correctly."""
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import importlib
import shutil
import sys
from packaging.version import parse
module_list = [
("torchaudio", None, None),
("torch_optimizer", None, None),
("warpctc_pytorch", None, "installers/install_warp-ctc.sh"),
("warprnnt_pytorch", None, "installers/install_warp-transducer.sh"),
("chainer_ctc", None, "installers/install_chainer_ctc.sh"),
("pyopenjtalk", None, "installers/install_pyopenjtalk.sh"),
("tdmelodic_pyopenjtalk", None, "installers/install_tdmelodic_pyopenjtalk.sh"),
("kenlm", None, "installers/install_kenlm.sh"),
("mmseg", None, "installers/install_py3mmseg.sh"),
("espnet", None, None),
("fairseq", None, "installers/install_fairseq.sh"),
("phonemizer", None, "installers/install_phonemizer.sh"),
("gtn", None, "installers/install_gtn.sh"),
("s3prl", None, "installers/install_s3prl.sh"),
("transformers", None, "installers/install_transformers.sh"),
("speechbrain", None, "installers/install_speechbrain.sh"),
("k2", None, "installers/install_k2.sh"),
("longformer", None, "installers/install_longformer.sh"),
("nlg-eval", None, "installers/install_longformer.sh"),
("datasets", None, "installers/install_longformer.sh"),
]
executable_list = [
("sclite", "installers/install_sctk.sh"),
("sph2pipe", "installers/install_sph2pipe.sh"),
("PESQ", "installers/install_pesq.sh"),
("BeamformIt", "installers/install_beamformit.sh"),
]
def main():
"""Check the installation."""
python_version = sys.version.replace("\n", " ")
print(f"[x] python={python_version}")
print()
print("Python modules:")
try:
import torch
print(f"[x] torch={torch.__version__}")
if torch.cuda.is_available():
print(f"[x] torch cuda={torch.version.cuda}")
else:
print("[ ] torch cuda")
if torch.backends.cudnn.is_available():
print(f"[x] torch cudnn={torch.backends.cudnn.version()}")
else:
print("[ ] torch cudnn")
if torch.distributed.is_nccl_available():
print("[x] torch nccl")
else:
print("[ ] torch nccl")
except ImportError:
print("[ ] torch")
try:
import chainer
print(f"[x] chainer={chainer.__version__}")
if parse(chainer.__version__) != parse("6.0.0"):
print(
f"Warning! chainer={chainer.__version__} is not supported. "
"Supported version is 6.0.0"
)
if chainer.backends.cuda.available:
print("[x] chainer cuda")
else:
print("[ ] chainer cuda")
if chainer.backends.cuda.cudnn_enabled:
print("[x] chainer cudnn")
else:
print("[ ] chainer cudnn")
except ImportError:
print("[ ] chainer")
try:
import cupy
print(f"[x] cupy={cupy.__version__}")
try:
from cupy.cuda import nccl # NOQA
print("[x] cupy nccl")
except ImportError:
print("[ ] cupy nccl")
except ImportError:
print("[ ] cupy")
to_install = []
for name, versions, installer in module_list:
try:
m = importlib.import_module(name)
if hasattr(m, "__version__"):
version = m.__version__
print(f"[x] {name}={version}")
if versions is not None and version not in versions:
print(
f"Warning! {name}={version} is not suppoted. "
"Supported versions are {versions}"
)
else:
print(f"[x] {name}")
except ImportError:
print(f"[ ] {name}")
if installer is not None:
to_install.append(f"Use '{installer}' to install {name}")
print()
print("Executables:")
for name, installer in executable_list:
if shutil.which(name) is not None:
print(f"[x] {name}")
else:
print(f"[ ] {name}")
if installer is not None:
to_install.append(f"Use '{installer}' to install {name}")
print()
print("INFO:")
for m in to_install:
print(m)
if __name__ == "__main__":
main()
|
the-stack_106_25105 | # -*- coding: utf-8 -*-
#
# wotlkdoc_images documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 1 00:00:00 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from __future__ import unicode_literals
import os
from datetime import datetime
import wotlkdoc_images
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.jinja',
'sphinx_copybutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'wotlkdoc_images'
copyright = '%s, Sanhe Hu' % datetime.utcnow().year
author = 'Sanhe Hu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = wotlkdoc_images.__version__
# The full version, including alpha/beta/rc tags.
release = wotlkdoc_images.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = "./_static/wotlkdoc_images-logo.png"
html_favicon = "./_static/wotlkdoc_images-favicon.ico"
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'wotlkdoc_imagesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'wotlkdoc_images.tex', 'wotlkdoc_images Documentation',
u'Sanhe Hu', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wotlkdoc_images', 'wotlkdoc_images Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'wotlkdoc_images', 'wotlkdoc_images Documentation',
author, 'wotlkdoc_images', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
autodoc_member_order = 'bysource'
# Enable custom css
try:
custom_style_file_path = os.path.join(os.path.dirname(__file__), "_static", ".custom-style.rst")
with open(custom_style_file_path, "rb") as f:
custom_style_file_content = f.read().decode("utf-8")
rst_prolog = "\n" + custom_style_file_content + "\n"
except:
pass
# Add data for Jinja2
try:
from wotlkdoc_images.docs import doc_data
except:
doc_data = dict()
jinja_contexts = {
"doc_data": {
"doc_data": doc_data,
},
}
# Api Reference Doc
import docfly
package_name = wotlkdoc_images.__name__
docfly.ApiReferenceDoc(
conf_file=__file__,
package_name=package_name,
ignored_package=[
"%s.pkg" % package_name,
"%s.docs" % package_name,
"%s.tests" % package_name,
]
).fly()
def source_read_callback(app, docname, source):
"""
This function will be called every time after Sphinx read a rst file content.
"""
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
src = source[0]
src = docfly.DocTree.fly(
conf_path=__file__, docname=docname, source=src,
maxdepth=1,
)
source[0] = src
def setup(app):
app.add_stylesheet('css/custom-style.css')
app.add_javascript('js/sorttable.js')
app.connect("source-read", source_read_callback) |
the-stack_106_25110 | #
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, [email protected].
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
import numpy as np
from scipy.sparse import identity, issparse
from amfe.linalg.tools import isboolean
from .constraint_formulation import ConstraintFormulationBase
class BooleanEliminationConstraintFormulation(ConstraintFormulationBase):
"""
Works only with holonomic scleronomic constraints that result in a constant Boolean B matrix
(Jacobian of the constraint function)
Attributes
----------
_L: csr_matrix
Matrix that is able to eliminate the constrained dofs by applying :math:`L^T A L` to a matrices A
_L_changed: bool
Internal flag that indicates if L must be updated when it is asked for the next time
Notes
-----
Currently there is no check if this formulation is allowed to use!
It may only be used for constraints defined by Bu = 0 with boolean matrix B
"""
def __init__(self, no_of_dofs_unconstrained, M_func, h_func, B_func, p_func=None,
jac_h_u=None, jac_h_du=None, jac_p_u=None, jac_p_du=None,
g_func=None, b_func=None, a_func=None):
super().__init__(no_of_dofs_unconstrained, M_func, h_func, B_func, p_func,
jac_h_u, jac_h_du, jac_p_u, jac_p_du,
g_func, b_func, a_func)
self._L = None
self._L_changed = True # Setting flag for lazy evaluation
@property
def dimension(self):
"""
Returns the dimension of the system after constraints have been applied
Returns
-------
dim: int
dimension of the system after constraints are applied
"""
return self.L.shape[1]
@property
def L(self):
"""
Returns the L matrix that is able to eliminate the constrained dofs by applying :math:`L^T A L` to a matrices A
Returns
-------
L: csr_matrix
The matrix L
"""
if self._L_changed:
self._compute_L()
self._L_changed = False
return self._L
def update(self):
"""
Function that is called by observers if state has changed
Returns
-------
None
"""
# This class assumes that the C matrix is constant and Boolean
self._L_changed = True
def _compute_L(self):
"""
Internal function that computes the matrix L
The function is called when L must be updated
L is the nullspace of B
Returns
-------
None
"""
# Boolean elimination assumes that C is constant (scleronomic) and independent on q!
# Thus, C is called by just calling for any arbitrary values, q and t
q = np.zeros(self._no_of_dofs_unconstrained, dtype=float)
t = 0.0
B = self._B_func(q, t)
constrained_dofs = self._get_constrained_dofs_by_B(B)
if issparse(B):
self._L = self._get_L_by_constrained_dofs(constrained_dofs, B.shape[1], format='csr')
else:
self._L = self._get_L_by_constrained_dofs(constrained_dofs, B.shape[1], format='dense')
@staticmethod
def _get_constrained_dofs_by_B(B):
"""
Static method that computes the indices of those dofs that are constrained when a matrix B is given that
is boolean
Parameters
----------
B: csr_matrix
B is a matrix coming from the constraint definitions: B q + b = 0
Returns
-------
"""
# Check if B is boolean
# later also for substructuring coupling: if np.array_equal(np.abs(B_boolean), np.abs(B_boolean).astype(bool)):
if isboolean(B):
# check if only one 1 is in each row:
if issparse(B):
Bcsr = B.tocsr()
if np.array_equal(Bcsr.indptr, np.arange(len(Bcsr.indices)+1)):
constrained_dofs = B.tocsr().indices.tolist()
else:
raise ValueError('B_boolean must have exactly one 1-entry per row')
else:
counts = np.count_nonzero(B, axis=1)
if np.all(counts == 1):
constrained_dofs = list()
for row in B:
index = np.where(row == 1)[0][0]
constrained_dofs.append(index)
else:
raise ValueError('B_boolean must have exactly one 1-entry per row')
return constrained_dofs
else:
raise ValueError('B_boolean must be a Boolean array')
@staticmethod
def _get_L_by_constrained_dofs(constrained_dofs, ndof_unconstrained, format='csr'):
"""
Internal static function that computes L by given indices of constrained dofs
Parameters
----------
constrained_dofs: list or ndarray
list containing the indices of the constrained dofs
ndof_unconstrained: int
number of dofs of the unconstrained system
format: str
format = 'csr' or 'dense' describes the format of L
Returns
-------
L: csr_matrix
computed L matrix
"""
L = identity(ndof_unconstrained, format='csr')
col_idxs_not_to_remove = np.arange(0, ndof_unconstrained)
col_idxs_not_to_remove = np.delete(col_idxs_not_to_remove, constrained_dofs)
if format == 'csr':
return L[:, col_idxs_not_to_remove]
elif format == 'dense':
return L[:, col_idxs_not_to_remove].toarray()
else:
raise ValueError('Only csr or dense format allowed')
def u(self, x, t):
"""
Parameters
----------
x: numpy.array
Global state vector of the system
t: float
time
Returns
-------
u: numpy.array
recovered displacements of the unconstrained system
"""
return self.L.dot(x)
def du(self, x, dx, t):
"""
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
du: numpy.array
recovered velocities of the unconstrained system
"""
return self.L.dot(dx)
def ddu(self, x, dx, ddx, t):
"""
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
ddx: numpy.array
Second time derivative of global state vector of the constrained system
t: float
time
Returns
-------
ddu: numpy.array
recovered accelerations of the unconstrained system
"""
return self.L.dot(ddx)
def lagrange_multiplier(self, x, t):
"""
Recovers the lagrange multipliers of the unconstrained system
Parameters
----------
x: numpy.array
Global state vector of the system
t: float
time
Returns
-------
lambda_: numpy.array
recovered displacements of the unconstrained system
"""
return np.array([], ndmin=1, dtype=float)
def M(self, x, dx, t):
r"""
Returns the constrained mass matrix
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
M: csr_matrix
Constrained mass matrix
Notes
-----
In this formulation this returns
.. math::
L^T M_{raw} L
"""
u = self.u(x, t)
du = self.du(x, dx, t)
return self.L.T.dot(self._M_func(u, du, t)).dot(self.L)
def f_int(self, x, dx, t):
r"""
Returns the constrained f_int vector
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
F: numpy.array
Constrained F vector
Notes
-----
In this formulation this returns
.. math::
L^T h(u, du, t)
"""
u = self.u(x, t)
du = self.du(x, dx, t)
return self.L.T.dot(self._h_func(u, du, t))
def f_ext(self, x, dx, t):
r"""
Returns the constrained f_ext vector
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
F: numpy.array
Constrained F vector
Notes
-----
In this formulation this returns
.. math::
L^T p(u, du, t)
"""
u = self.u(x, t)
du = self.du(x, dx, t)
return self.L.T.dot(self._p_func(u, du, t))
def K(self, x, dx, t):
r"""
Returns the constrained stiffness matrix
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
K: csr_matrix
Constrained mass matrix
Notes
-----
In this formulation this returns
.. math::
L^T \frac{\mathrm{d}(h-p)}{\mathrm{d} u} L
"""
u = self.u(x, t)
du = self.du(x, dx, t)
if self._jac_h_u is not None:
if self._jac_p_u is not None:
return self.L.T.dot(self._jac_h_u(u, du, t) - self._jac_p_u(u, du, t)).dot(self.L)
else:
return self.L.T.dot(self._jac_h_u(u, du, t)).dot(self.L)
else:
raise NotImplementedError('Numerical differentiation of h is not implemented yet')
def D(self, x, dx, t):
r"""
Returns the constrained damping matrix
Parameters
----------
x: numpy.array
Global state vector of the system
dx: numpy.array
First time derivative of global state vector of the constrained system
t: float
time
Returns
-------
D: csr_matrix
Constrained damping matrix
Notes
-----
In this formulation this returns
.. math::
L^T \frac{\mathrm{d}(h-p)}{\mathrm{d} \dot{u}} L
"""
u = self.u(x, t)
du = self.du(x, dx, t)
if self._jac_h_du is not None:
if self._jac_p_du is not None:
return self.L.T.dot(self._jac_h_du(u, du, t) - self._jac_p_du(u, du, t)).dot(self.L)
else:
return self.L.T.dot(self._jac_h_du(u, du, t)).dot(self.L)
else:
raise NotImplementedError('Numerical differentiation of h is not implemented yet')
|
the-stack_106_25112 | # Copyright (c) 2020 Aldo Hoeben / fieldOfView
# The ArcWelderPlugin for Cura is released under the terms of the AGPLv3 or higher.
from collections import OrderedDict
import json
import tempfile
import os
import stat
import subprocess
import locale
import re
from UM.Extension import Extension
from UM.Application import Application
from UM.Settings.SettingDefinition import SettingDefinition
from UM.Settings.DefinitionContainer import DefinitionContainer
from UM.Settings.ContainerRegistry import ContainerRegistry
from UM.Logger import Logger
from UM.Platform import Platform
from typing import Dict, List, Any
class ArcWelderPlugin(Extension):
def __init__(self):
super().__init__()
self._application = Application.getInstance()
self._i18n_catalog = None
settings_definition_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "arcwelder_settings.def.json"
)
try:
with open(settings_definition_path, "r", encoding="utf-8") as f:
self._settings_dict = json.load(f, object_pairs_hook=OrderedDict)
except:
Logger.logException("e", "Could not load arc welder settings definition")
return
if Platform.isWindows():
arcwelder_executable = "bin/win64/ArcWelder.exe"
elif Platform.isLinux():
arcwelder_executable = "bin/linux/ArcWelder"
elif Platform.isOSX():
arcwelder_executable = "bin/osx/ArcWelder"
self._arcwelder_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), arcwelder_executable
)
if not Platform.isWindows():
try:
os.chmod(
self._arcwelder_path,
stat.S_IXUSR
| stat.S_IRUSR
| stat.S_IRGRP
| stat.S_IROTH
| stat.S_IWUSR,
) # Make sure we have the rights to run this.
except:
Logger.logException("e", "Could modify rights of ArcWelder executable")
return
if Platform.isWindows():
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
version_output = subprocess.check_output(
[self._arcwelder_path, "--version"], startupinfo=startupinfo
).decode(locale.getpreferredencoding())
match = re.search("version: (.*)", version_output)
if match:
Logger.log("d", "Using ArcWelder %s" % match.group(1))
else:
Logger.log("w", "Could not determine ArcWelder version")
self._application.getPreferences().addPreference(
"arcwelderplugin/settings_made_visible", False
)
ContainerRegistry.getInstance().containerLoadComplete.connect(
self._onContainerLoadComplete
)
self._application.getOutputDeviceManager().writeStarted.connect(
self._filterGcode
)
def _onContainerLoadComplete(self, container_id: str) -> None:
if not ContainerRegistry.getInstance().isLoaded(container_id):
# skip containers that could not be loaded, or subsequent findContainers() will cause an infinite loop
return
try:
container = ContainerRegistry.getInstance().findContainers(id=container_id)[
0
]
except IndexError:
# the container no longer exists
return
if not isinstance(container, DefinitionContainer):
# skip containers that are not definitions
return
if container.getMetaDataEntry("type") == "extruder":
# skip extruder definitions
return
try:
category = container.findDefinitions(key="blackmagic")[0]
except IndexError:
Logger.log("e", "Could not find parent category setting to add settings to")
return
for setting_key in self._settings_dict.keys():
setting_definition = SettingDefinition(
setting_key, container, category, self._i18n_catalog
)
setting_definition.deserialize(self._settings_dict[setting_key])
# add the setting to the already existing blackmagic settingdefinition
# private member access is naughty, but the alternative is to serialise, nix and deserialise the whole thing,
# which breaks stuff
category._children.append(setting_definition)
container._definition_cache[setting_key] = setting_definition
self._expanded_categories = self._application.expandedCategories.copy()
self._updateAddedChildren(container, setting_definition)
self._application.setExpandedCategories(self._expanded_categories)
self._expanded_categories.clear()
container._updateRelations(setting_definition)
preferences = self._application.getPreferences()
if not preferences.getValue("arcwelderplugin/settings_made_visible"):
setting_keys = self._getAllSettingKeys(self._settings_dict)
visible_settings = preferences.getValue("general/visible_settings")
visible_settings_changed = False
for key in setting_keys:
if key not in visible_settings:
visible_settings += ";%s" % key
visible_settings_changed = True
if visible_settings_changed:
preferences.setValue("general/visible_settings", visible_settings)
preferences.setValue("arcwelderplugin/settings_made_visible", True)
def _updateAddedChildren(
self, container: DefinitionContainer, setting_definition: SettingDefinition
) -> None:
children = setting_definition.children
if not children or not setting_definition.parent:
return
# make sure this setting is expanded so its children show up in setting views
if setting_definition.parent.key in self._expanded_categories:
self._expanded_categories.append(setting_definition.key)
for child in children:
container._definition_cache[child.key] = child
self._updateAddedChildren(container, child)
def _getAllSettingKeys(self, definition: Dict[str, Any]) -> List[str]:
children = []
for key in definition:
children.append(key)
if "children" in definition[key]:
children.extend(self._getAllSettingKeys(definition[key]["children"]))
return children
def _filterGcode(self, output_device):
scene = self._application.getController().getScene()
global_container_stack = self._application.getGlobalContainerStack()
if not global_container_stack:
return
arcwelder_enable = global_container_stack.getProperty(
"arcwelder_enable", "value"
)
if not arcwelder_enable:
Logger.log("d", "ArcWelder is not enabled")
return
maximum_radius = global_container_stack.getProperty(
"arcwelder_maximum_radius", "value"
)
path_tolerance = (
global_container_stack.getProperty("arcwelder_tolerance", "value") / 100
)
resolution = global_container_stack.getProperty("arcwelder_resolution", "value")
firmware_compensation = global_container_stack.getProperty(
"arcwelder_firmware_compensation", "value"
)
min_arc_segment = int(
global_container_stack.getProperty("arcwelder_min_arc_segment", "value")
)
mm_per_arc_segment = global_container_stack.getProperty(
"arcwelder_mm_per_arc_segment", "value"
)
allow_3d_arcs = global_container_stack.getProperty(
"arcwelder_allow_3d_arcs", "value"
)
allow_dynamic_precision = global_container_stack.getProperty(
"arcwelder_allow_dynamic_precision", "value"
)
default_xyz_precision = int(
global_container_stack.getProperty(
"arcwelder_default_xyz_precision", "value"
)
)
default_e_precision = int(
global_container_stack.getProperty("arcwelder_default_e_precision", "value")
)
g90_influences_extruder = global_container_stack.getProperty(
"arcwelder_g90_influences_extruder", "value"
)
# If the scene does not have a gcode, do nothing
gcode_dict = getattr(scene, "gcode_dict", {})
if not gcode_dict: # this also checks for an empty dict
Logger.log("w", "Scene has no gcode to process")
return
dict_changed = False
layer_separator = ";ARCWELDERPLUGIN_GCODELIST_SEPARATOR\n"
processed_marker = ";ARCWELDERPROCESSED\n"
for plate_id in gcode_dict:
gcode_list = gcode_dict[plate_id]
if len(gcode_list) < 2:
Logger.log("w", "Plate %s does not contain any layers", plate_id)
continue
if processed_marker in gcode_list[0]:
Logger.log("d", "Plate %s has already been processed", plate_id)
continue
if len(gcode_list) > 0:
# remove header from gcode, so we can put it back in front after processing
header = gcode_list.pop(0)
else:
header = ""
joined_gcode = layer_separator.join(gcode_list)
file_descriptor, temporary_path = tempfile.mkstemp()
Logger.log("d", "Using temporary file %s", temporary_path)
with os.fdopen(file_descriptor, "w", encoding="utf-8") as temporary_file:
temporary_file.write(joined_gcode)
command_arguments = [
self._arcwelder_path,
"-m=%f" % maximum_radius,
"-t=%f" % path_tolerance,
"-r=%f" % resolution
#"-x=%d" % default_xyz_precision,
#"-e=%d" % default_e_precision,
]
if firmware_compensation:
command_arguments.extend(
["-s=%f" % mm_per_arc_segment, "-a=%d" % min_arc_segment]
)
if allow_3d_arcs:
command_arguments.append("-z")
if allow_dynamic_precision:
command_arguments.append("-d")
if g90_influences_extruder:
command_arguments.append("-g")
command_arguments.append(temporary_path)
Logger.log(
"d",
"Running ArcWelder with the following options: %s" % command_arguments,
)
if Platform.isWindows():
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
process_output = subprocess.check_output(
command_arguments, startupinfo=startupinfo
).decode(locale.getpreferredencoding())
Logger.log("d", process_output)
with open(temporary_path, "r", encoding="utf-8") as temporary_file:
result_gcode = temporary_file.read()
os.remove(temporary_path)
gcode_list = result_gcode.split(layer_separator)
if header != "":
gcode_list.insert(0, header) # add header back in front
gcode_list[0] += processed_marker
gcode_dict[plate_id] = gcode_list
dict_changed = True
if dict_changed:
setattr(scene, "gcode_dict", gcode_dict)
|
the-stack_106_25120 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyEasybuildFramework(PythonPackage):
"""The core of EasyBuild, a software build and installation framework
for (scientific) software on HPC systems.
"""
homepage = 'http://hpcugent.github.io/easybuild/'
url = 'https://pypi.io/packages/source/e/easybuild-framework/easybuild-framework-3.1.2.tar.gz'
version('3.1.2', '283bc5f6bdcb90016b32986d52fd04a8')
depends_on('[email protected]:2.8', type='run')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('[email protected]:', when='@2.9:', type='run')
depends_on('py-vsc-install', type='run') # only required for tests (python -O -m test.framework.suite)
|
the-stack_106_25122 | import logging
import time
from typing import List, Optional
from ..kafka import KafkaProducer
from ..models.event_models import (
AcknowledgedWorkRequestEvent,
CompletedWorkRequestEvent,
HeartbeatEvent,
LeaderChangedEvent,
)
from ..models.subsystem_config_models import HeartbeatSubsystemConfiguration
from ..state_machine.models import TimeoutFlag
_logger = logging.getLogger(__name__)
class HeartbeatSubsystem:
def __init__(self, config: HeartbeatSubsystemConfiguration):
self._config: HeartbeatSubsystemConfiguration = config
self._count = 0
self._leader_id: Optional[str] = None
self._current_work_request_id: Optional[str] = None
self._heartbeat_timeout = TimeoutFlag(
self._config.heartbeat_delay_ms, "Time for heartbeat"
)
self._completed_work_request_ids: List[str] = []
def _debug(self, msg: str):
return
_logger.debug(f"{msg}")
def _broadcast_heartbeat(self, reason: str):
self._debug(
f"{str(self._config.worker_id)} Broadcasting heartbeat sequence {self._count} because {reason}"
)
self._count = (self._count + 1) % 1000000
self._producer.produce(
HeartbeatEvent(
worker_id=str(self._config.worker_id),
current_work_request_id=self._current_work_request_id,
completed_work_request_ids=self._completed_work_request_ids,
count=self._count,
leader_id=self._leader_id,
)
)
self._heartbeat_timeout.reset()
def _should_exit(self):
return self._config.should_exit_flag.get_value()
def _configure_producer(self) -> KafkaProducer:
return KafkaProducer(
broker=self._config.get_kafka_producer_broker(),
topic=self._config.heartbeat_topic,
)
def _get_leader_changed_event(self):
latest: Optional[LeaderChangedEvent] = None
while not self._config.leader_changed_event_queue.empty():
latest = self._config.leader_changed_event_queue.get()
if latest is None:
return
self._leader_id = latest.leader_id
leader_id_str = self._leader_id if self._leader_id is not None else "None"
self._debug(f"Consumed leader changed event. New leader is {leader_id_str} ")
def _get_work_request_event(self):
# TODO: This smells more than I expected. I'm not sure if this would be better suited for another
# subsystem entirely.
consuming = True
while consuming:
consuming = False
if (
self._current_work_request_id is None
and not self._config.acknowledged_work_request_event_queue.empty()
):
acknowledged_work_request_event: AcknowledgedWorkRequestEvent = (
self._config.acknowledged_work_request_event_queue.get()
)
if (
acknowledged_work_request_event.work_request_id
not in self._completed_work_request_ids
):
self._current_work_request_id = (
acknowledged_work_request_event.work_request_id
)
self._broadcast_heartbeat(
f"We are acknowledging the assignment of work request {self._current_work_request_id}."
)
consuming = True
if (
self._current_work_request_id is not None
and not self._config.completed_work_request_event_queue.empty()
):
completed_work_request_event: CompletedWorkRequestEvent = (
self._config.completed_work_request_event_queue.get()
)
self._current_work_request_id = (
None
if self._current_work_request_id
== completed_work_request_event.work_request_id
else self._current_work_request_id
)
self._completed_work_request_ids.append(
completed_work_request_event.work_request_id
)
self._broadcast_heartbeat(
f"We are completing work request {completed_work_request_event.work_request_id}."
)
consuming = True
def _unsafe_run(self):
self._producer = self._configure_producer()
while not self._should_exit():
self._get_leader_changed_event()
self._get_work_request_event()
if self._heartbeat_timeout.get_value():
self._broadcast_heartbeat(
"We need to let the leader know we're still alive."
)
def start(self):
self._config.started_flag.set_value()
try:
self._unsafe_run()
except Exception as ex:
self._config.exception_flag.set_value(context={"ex": ex})
_logger.exception("Unhandled exception in heartbeat subsystem.")
finally:
self._config.exited_flag.set_value()
|
the-stack_106_25123 | # -*- coding: utf-8 -*-
"""
Set of functions to make diff from Scan results.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.utils.encoding import force_unicode
from ralph.discovery.models import DeviceType
from ralph.scan.data import UNIQUE_FIELDS_FOR_MERGER
RAW_DEVICE_TYPES = [
choice_name
for _, choice_name in DeviceType()
]
def _sort_dict_by_multiple_fields_values(keynames):
def getit(adict):
composite = []
for key in keynames:
if key in adict:
composite.append(adict[key])
return composite
return getit
def sort_results(data, ignored_fields=set(['device'])):
"""
Sort results for all components and all plugins.
"""
for component, results in data.iteritems():
if component not in UNIQUE_FIELDS_FOR_MERGER:
continue
for sources, plugins_data in results.iteritems():
keynames = set()
for fields_group in UNIQUE_FIELDS_FOR_MERGER[component]:
for field in fields_group:
if field in ignored_fields:
continue
keynames.add(field)
if keynames:
plugin_data = sorted(
plugins_data,
key=_sort_dict_by_multiple_fields_values(keynames),
)
data[component][sources] = plugin_data
def _get_matched_row(rows, lookup):
"""
Return row that matches lookup fields.
"""
for index, row in enumerate(rows):
matched = True
for field, value in lookup.items():
if force_unicode(row.get(field, '')).strip() != value:
matched = False
break
if matched:
return index, row
return None, None
def _compare_dicts(
ldict,
rdict,
ignored_fields=set(['device', 'index', 'model_name'])
):
"""
Compare two dicts and return comparison status (match), diff and set of
keys that are available in compared dicts.
"""
match = True
diff = {}
keys = (set(ldict.keys()) | set(rdict.keys())) - ignored_fields
for key in keys:
lvalue = force_unicode(ldict.get(key, '')).strip()
rvalue = force_unicode(rdict.get(key, '')).strip()
if lvalue and not rvalue:
match = False
diff[key] = {
'status': b'-',
'left_value': lvalue,
'right_value': '',
}
elif not lvalue and rvalue:
match = False
diff[key] = {
'status': b'+',
'left_value': '',
'right_value': rvalue,
}
else:
if lvalue == rvalue:
diff[key] = {
'status': b'',
'left_value': lvalue,
'right_value': rvalue,
}
else:
match = False
diff[key] = {
'status': b'?',
'left_value': lvalue,
'right_value': rvalue,
}
return match, diff, keys
def _compare_lists(*args):
"""
Compare two or more lists. Return True if all equals.
"""
if not args:
return True
compared_item = set(args[0])
for item in args[1:]:
if compared_item != set(item):
return False
return True
def _compare_strings(*args):
"""
Compare two or more strings. Return True if all equals.
"""
if not args:
return True
compared_item = force_unicode(args[0]).strip()
for item in args[1:]:
if compared_item != force_unicode(item).strip():
return False
return True
def _find_database_key(results):
"""
The same data from different plugins are connected together in results
under the same key. This key is just a tuple (e.q. (database, puppet)).
This function returns tuple which contains "database" element.
"""
for sources in results.iterkeys():
if 'database' in sources:
return sources
def _sanitize_component_values(values=[]):
result = []
for value in values:
for device_type in RAW_DEVICE_TYPES:
if '(%s)' % device_type in value:
value = value.replace('(%s)' % device_type, '').strip()
result.append(value)
return result
def diff_results(data, ignored_fields=set(['device', 'model_name'])):
"""
Make diff from Scan results.
"""
diffs = {}
for component, results in data.iteritems():
if component in ('subdevices', 'connections'):
continue # skipped because this is not component...
db_results_key = _find_database_key(results)
if not db_results_key:
continue # incomplete data
diff_result = {
'is_equal': False,
'meta': {'no_value': []},
}
# it would be better to define a global dict to look-up ignored fields
# by component, but fot the time being, we want to make an exception
# only for 'parts' component and '_compare_dicts' function
# - hence 'ignored_fields_for_compare_dicts' below
if component == 'parts':
ignored_fields_for_compare_dicts = (
set(['device', 'index', 'model_name', 'name'])
)
else:
# these should be the same as defaults in '_compare_dicts'
ignored_fields_for_compare_dicts = (
set(['device', 'index', 'model_name'])
)
if component not in UNIQUE_FIELDS_FOR_MERGER:
if isinstance(results[db_results_key], list):
diff_result.update({
'is_equal': _compare_lists(*tuple(results.values())),
'type': 'lists',
})
else:
component_values = results.values()
if component == 'model_name':
component_values = _sanitize_component_values(
component_values
)
elif (component == 'type' and
set(component_values) == set(['unknown'])):
diff_result['meta']['no_value'].append(component)
diff_result.update({
'is_equal': _compare_strings(*tuple(component_values)),
'type': 'strings',
})
else:
diff_result.update({
'type': 'dicts',
'diff': [],
})
database = results.get(db_results_key, [])
merged = results.get(('merged',), [])
database_parsed_rows = set()
merged_parsed_rows = set()
headers = set()
add_items_count = 0
remove_items_count = 0
change_items_count = 0
for index, items in enumerate(database):
for field_group in UNIQUE_FIELDS_FOR_MERGER[component]:
# some rows could be return with the same index by
# different lookups
if index in database_parsed_rows:
break
lookup = {}
for field in field_group:
if field in ignored_fields:
continue
field_db_value = force_unicode(
items.get(field, '')
).strip()
if not field_db_value:
continue
lookup[field] = field_db_value
if lookup:
matched_index, matched_row = _get_matched_row(
merged,
lookup,
)
if matched_row:
database_parsed_rows.add(index)
merged_parsed_rows.add(matched_index)
status, row_diff, rows_keys = _compare_dicts(
items,
matched_row,
ignored_fields=ignored_fields_for_compare_dicts,
)
diff_result['diff'].append({
'status': b'?' if not status else b'',
'items': items,
'dict_diff': row_diff,
})
if not status:
change_items_count += 1
headers |= rows_keys
if index not in database_parsed_rows:
diff_result['diff'].append({
'status': b'-',
'items': items,
'dict_diff': None,
})
remove_items_count += 1
headers |= set(items.keys())
for index, items in enumerate(merged):
if index not in merged_parsed_rows:
diff_result['diff'].append({
'status': b'+',
'items': items,
'dict_diff': None,
})
add_items_count += 1
headers |= set(items.keys())
headers -= ignored_fields
headers -= {'index'}
diff_result.update({
'is_equal': all((
add_items_count == 0,
remove_items_count == 0,
change_items_count == 0,
)),
'meta': {
'add_items_count': add_items_count,
'remove_items_count': remove_items_count,
'change_items_count': change_items_count,
'headers': headers,
},
})
diffs[component] = diff_result
return diffs
|
the-stack_106_25125 | from typing import List
from eventz.messages import Event
from eventz_aws.types import EventPublisherProtocol
class EventPublisherDummy(EventPublisherProtocol):
def __init__(self):
self.events: List[Event] = []
def publish(
self,
connection_id: str,
route: str,
msgid: str,
dialog: str,
seq: int,
event: Event,
) -> None:
self.events.append(event)
|
the-stack_106_25126 | from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
from sqlalchemy.pool import SingletonThreadPool
from flask import session as login_session
import random, string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Restaurant Menu Application"
app = Flask(__name__)
engine = create_engine('sqlite:///restaurantmenu.db?check_same_thread=False',poolclass=SingletonThreadPool)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/restaurants/JSON')
def restaurantsJSON():
restaurant_list = session.query(Restaurant).all()
return jsonify(Restaurant=[i.serialize for i in restaurant_list])
#Making an API endpoint (GET Request)
@app.route('/restaurants/<int:restaurant_id>/menu/JSON')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
# ADD JSON ENDPOINT HERE
@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def menuItemJSON(restaurant_id, menu_id):
menuItem = session.query(MenuItem).filter_by(id=menu_id).one()
return jsonify(MenuItem=menuItem.serialize)
@app.route('/login/')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
print("Valid state")
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
print(oauth_flow)
print("Valid oauth_flow")
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
print("Got credentials")
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print("Token's client ID does not match app's.")
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print("done!")
return output
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
if access_token is None:
print('Access Token is None')
response = make_response(json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
print('In gdisconnect access token is %s', access_token)
print('User name is: ')
print(login_session['username'])
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print('result is ')
print(result)
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/')
@app.route('/restaurants/')
def restaurants():
restaurant_list = session.query(Restaurant).all()
return render_template('restaurants_list.html',restaurants=restaurant_list)
@app.route('/restaurants/new/', methods=['GET','POST'])
def newRestaurant():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newResto = Restaurant(name=request.form['name'])
session.add(newResto)
session.commit()
flash("New restaurant created!")
return redirect(url_for('restaurants'))
else:
return render_template('newrestaurant.html')
@app.route('/restaurants/<int:restaurant_id>/edit/', methods=['GET','POST'])
def editRestaurant(restaurant_id):
editedItem = session.query(Restaurant).filter_by(id=restaurant_id).one()
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
session.add(editedItem)
session.commit()
flash("Restaurant updated!")
return redirect(url_for('restaurants'))
else:
return render_template('editrestaurant.html', restaurant=editedItem)
@app.route('/restaurants/<int:restaurant_id>/delete/', methods=['GET','POST'])
def deleteRestaurant(restaurant_id):
itemToDelete = session.query(Restaurant).filter_by(id=restaurant_id).one()
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash("Restaurant deleted!")
return redirect(url_for('restaurants'))
else:
return render_template('deleterestaurant.html', restaurant=itemToDelete)
@app.route('/restaurants/<int:restaurant_id>/')
def restaurantMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id=restaurant.id)
return render_template('menu.html',restaurant=restaurant, items=items)
# Task 1: Create route for newMenuItem function here
@app.route('/restaurant/<int:restaurant_id>/new/', methods=['GET','POST'])
def newMenuItem(restaurant_id):
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
#name price description course
newItem = MenuItem(name=request.form['name'],description=request.form['description'],price=request.form['price'],course=request.form['course'], restaurant_id=restaurant_id)
session.add(newItem)
session.commit()
flash("New menu item created!")
return redirect(url_for('restaurantMenu',restaurant_id=restaurant_id))
else:
return render_template('newmenuitem.html',restaurant_id=restaurant_id)
# Task 2: Create route for editMenuItem function here
@app.route('/restaurant/<int:restaurant_id>/<int:menu_id>/edit/', methods=['GET','POST'])
def editMenuItem(restaurant_id, menu_id):
editedItem = session.query(MenuItem).filter_by(id=menu_id).one()
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
session.add(editedItem)
session.commit()
flash("Menu item edited!")
return redirect(url_for('restaurantMenu',restaurant_id=restaurant_id))
else:
return render_template('editmenuitem.html', restaurant_id=restaurant_id, menu_id= menu_id, item=editedItem)
# Task 3: Create a route for deleteMenuItem function here
@app.route('/restaurant/<int:restaurant_id>/<int:menu_id>/delete/', methods=['GET','POST'])
def deleteMenuItem(restaurant_id, menu_id):
itemToDelete = session.query(MenuItem).filter_by(id = menu_id).one()
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash("Menu item deleted!")
return redirect(url_for('restaurantMenu', restaurant_id = restaurant_id))
else:
return render_template('deletemenuitem.html', item = itemToDelete)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
the-stack_106_25127 | from flask import Flask, request, send_from_directory
import os
import argparse
from tqdm import tqdm
from flask import jsonify
import csv
import numpy
from bling_fire_tokenizer import BlingFireTokenizer
import yaml
app = Flask(__name__, static_url_path='')
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
#
# data loading & prep
#
with open(os.environ.get("RUN_CONFIG"), 'r') as ymlfile:
yaml_cfg = yaml.load(ymlfile)
runs = yaml_cfg["runs"]
max_doc_char_length = 100_000
def load_qrels(path):
with open(path,'r') as f:
qids_to_relevant_passageids = {}
for l in f:
try:
l = l.strip().split()
qid = l[0]
if l[3] != "0":
if qid not in qids_to_relevant_passageids:
qids_to_relevant_passageids[qid] = []
qids_to_relevant_passageids[qid].append(l[2].strip())
except:
raise IOError('\"%s\" is not valid format' % l)
return qids_to_relevant_passageids
qrels = []
clusters = []
collection = []
queries = []
queries_with_stats = []
secondary_model = []
secondary_qd = []
collection_cache = {}
queries_cache = {}
for run_id, run in enumerate(runs):
qrels.append(load_qrels(run["qrels"]))
with open(run["cluster-stats"],"r") as csv_file:
cluster_csv = csv.DictReader(csv_file)
_clusters = {}
for row in cluster_csv:
_clusters[row["cluster"]] = dict(row)
_clusters[row["cluster"]]["queries"] = []
with open(run["queries"],"r") as csv_file:
query_csv = csv.DictReader(csv_file)
_queries = {}
_queries_with_stats = {}
for row in query_csv:
_clusters[row["cluster"]]["queries"].append(dict(row))
_queries[row["qid"]] = row["text"]
_queries_with_stats[row["qid"]] = dict(row)
queries.append(_queries)
queries_with_stats.append(_queries_with_stats)
clusters.append(_clusters)
if run["collection"] in collection_cache:
collection.append(collection_cache[run["collection"]])
else:
_collection = {} # int id -> full line dictionary
with open(run["collection"],"r",encoding="utf8") as collection_file:
for line in tqdm(collection_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[1]
_collection[_id] = ls[3].rstrip()[:max_doc_char_length]
collection_cache[run["collection"]]= _collection
collection.append(_collection)
secondary = numpy.load(run["secondary-output"], allow_pickle = True)
secondary_model.append(secondary.get("model_data")[()])
secondary_qd.append(secondary.get("qd_data")[()])
#filter clusters according to the queries that are in the secondary output
qids_in_secondary_data = secondary_qd[run_id].keys()
for cluster_id in clusters[run_id].keys():
new_query_list = []
for qidx, query in enumerate(clusters[run_id][cluster_id]["queries"]):
if query["qid"] in qids_in_secondary_data:
new_query_list.append(query)
clusters[run_id][cluster_id]["queries"] = new_query_list
queries_to_remove = []
for qid in queries_with_stats[run_id].keys():
if not qid in qids_in_secondary_data:
queries_to_remove.append(qid)
for qid_remove in queries_to_remove:
queries_with_stats[run_id].pop(qid_remove)
if run["run-info"]["score_type"]=="tk" or run["run-info"]["score_type"]=="fk":
run["run-info"]["model_weights_log_len_mix"] = secondary.get("model_data")[()]["dense_comb_weight"][0].tolist()
import gc
gc.collect()
#
# api endpoints
#
@app.route('/dist/<path:path>')
def send_dist(path):
return send_from_directory('dist', path)
@app.route("/")
def main():
return send_from_directory('', 'index.html')
@app.route("/run-info")
def run_info():
return jsonify(runs=[r["run-info"] for r in runs])
@app.route("/evaluated-queries/<run>")
def all_queries(run):
return jsonify(clusters=clusters[int(run)])
@app.route("/query/<run>/<qid>")
def query(qid,run):
run = int(run)
documents = []
for doc in secondary_qd[run][qid]:
documents.append(get_document_info(runs[run]["run-info"]["score_type"],qid,doc,secondary_qd[run][qid][doc],run))
return jsonify(documents=documents)
#
# helper methods
#
tokenizer = BlingFireTokenizer()
def analyze_weighted_param_1D(name,values, param_weight,bias=None,last_x=5):
#print(name, ": value * weight + bias")
rolling_sum = 0
rolling_sum_after_x = 0
kernels = {}
after_x = len(values) - last_x
for i,val in enumerate(values):
param = param_weight[i]
if i < after_x:
kernels[i] = (float(val),float(param))
#print("["+str(i)+"]", str(val) + " * "+str(param) + " = "+ str(val*param))
rolling_sum += val*param
if i >= after_x:
rolling_sum_after_x += val*param
#if bias != None:
#print("Sum:",rolling_sum + bias)
#print("Sum(>="+str(after_x)+")",rolling_sum_after_x + bias)
#else:
#print("Sum:",rolling_sum)
#print("Sum(>="+str(after_x)+")",rolling_sum_after_x)
#print("-----------")
if bias != None:
rolling_sum = rolling_sum + bias
rolling_sum_after_x = rolling_sum_after_x + bias
return (kernels, float(rolling_sum),float(rolling_sum_after_x))
def get_document_info(score_type,qid,did,secondary_info,run):
document_info = {"id":float(did),"score":float(secondary_info["score"]),"judged_relevant": did in qrels[run][qid]}
if score_type == "tk" or score_type == "fk":
document_info["val_log"] = analyze_weighted_param_1D("log-kernels",secondary_info["per_kernel"],secondary_model[run]["dense_weight"][0],last_x=runs[run]["run-info"]["rest-kernels-last"])
document_info["val_len"] = analyze_weighted_param_1D("len-norm-kernels",secondary_info["per_kernel_mean"],secondary_model[run]["dense_mean_weight"][0],last_x=runs[run]["run-info"]["rest-kernels-last"])
if score_type == "knrm":
document_info["val_log"] = analyze_weighted_param_1D("log-kernels",secondary_info["per_kernel"],secondary_model[run]["kernel_weight"][0],last_x=runs[run]["run-info"]["rest-kernels-last"])
document_info["tokenized_query"] = tokenizer.tokenize(queries[run][qid])
document_info["tokenized_document"] = tokenizer.tokenize(collection[run][did])
#matches = []
matches_per_kernel = []
matches_per_kernel_strongest = []
original_mm = numpy.transpose(secondary_info["cosine_matrix_masked"][:len(document_info["tokenized_query"]),:len(document_info["tokenized_document"])]).astype('float64')
kernel_transformed = numpy.exp(- pow(numpy.expand_dims(original_mm,2) - numpy.array(runs[run]["run-info"]["kernels_mus"]), 2) / (2 * pow(0.1, 2)))
kernel_transformed_max_query_per_kernel = numpy.max(kernel_transformed,axis=1)
#for t,token in enumerate(document_info["tokenized_document"]):
# #largest_sim = secondary_info["cosine_matrix_masked"][max_query_id_per_doc[t]][t]
#
# kernel_results = [0]*len(runs["run-info"]["kernels_mus"])
# #matches_per_doc = []
# for i,m in enumerate(runs["run-info"]["kernels_mus"]):
# for q in range(secondary_info["cosine_matrix_masked"].shape[0]):
# kernel_results[i] = float(max(kernel_results[i],(kernel_transformed[q][t][i])))
# #matches_per_doc.append(float(secondary_info["cosine_matrix_masked"][q][t]))
#
# #matches.append(matches_per_doc)
# matches_per_kernel.append(kernel_results)
#
# strongest_kernel = numpy.argmax(numpy.array(kernel_results),axis=0).tolist()
# matches_per_kernel_strongest.append(strongest_kernel)
#print(secondary_info["cosine_matrix_masked"].dtype)
#print(original_mm.dtype)
#print(kernel_transformed.shape)
#print(kernel_transformed.dtype)
#print(original_mm)
#print(numpy.around(original_mm,3).dtype)
#print(numpy.around(original_mm,3).tolist())
#print(numpy.around(kernel_transformed,3).dtype)
document_info["matches"] = numpy.around(original_mm,3).tolist()
document_info["matches_per_kernel"] = numpy.around(kernel_transformed,3).tolist()
document_info["matches_per_kernel_max"] = numpy.around(kernel_transformed_max_query_per_kernel,3).tolist()
#for q in range(len(document_info["tokenized_query"])):
# mq = []
# for d in range(len(document_info["tokenized_document"])):
# mq.append(float(secondary_info["cosine_matrix_masked"][q][d]))
# matches.append(mq)
#document_info["matches"] = matches
return document_info |
the-stack_106_25132 | # -*- coding: utf-8 -*-
from pandas_ta.utils import get_offset, pascals_triangle, verify_series, weights
def pwma(close, length=None, asc=None, offset=None, **kwargs):
"""Indicator: Pascals Weighted Moving Average (PWMA)"""
# Validate Arguments
length = int(length) if length and length > 0 else 10
asc = asc if asc else True
close = verify_series(close, length)
offset = get_offset(offset)
if close is None: return
# Calculate Result
triangle = pascals_triangle(n=length - 1, weighted=True)
pwma = close.rolling(length, min_periods=length).apply(weights(triangle), raw=True)
# Offset
if offset != 0:
pwma = pwma.shift(offset)
# Handle fills
if "fillna" in kwargs:
pwma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
pwma.fillna(method=kwargs["fill_method"], inplace=True)
# Name & Category
pwma.name = f"PWMA_{length}"
pwma.category = "overlap"
return pwma
pwma.__doc__ = \
"""Pascal's Weighted Moving Average (PWMA)
Pascal's Weighted Moving Average is similar to a symmetric triangular window
except PWMA's weights are based on Pascal's Triangle.
Source: Kevin Johnson
Calculation:
Default Inputs:
length=10
def weights(w):
def _compute(x):
return np.dot(w * x)
return _compute
triangle = utils.pascals_triangle(length + 1)
PWMA = close.rolling(length)_.apply(weights(triangle), raw=True)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 10
asc (bool): Recent values weigh more. Default: True
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
|
the-stack_106_25133 | from typing import Optional, Union
import torch
from catalyst.metrics.functional._classification import precision_recall_fbeta_support
def precision(
outputs: torch.Tensor,
targets: torch.Tensor,
argmax_dim: int = -1,
eps: float = 1e-7,
num_classes: Optional[int] = None,
) -> Union[float, torch.Tensor]:
"""
Multiclass precision score.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., (num_classes or 1)]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
argmax_dim: int, that specifies dimension for argmax transformation
in case of scores/probabilities in ``outputs``
eps: float. Epsilon to avoid zero division.
num_classes: int, that specifies number of classes if it known
Returns:
Tensor: precision for every class
Examples:
.. code-block:: python
import torch
from catalyst import metrics
metrics.precision(
outputs=torch.tensor([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
]),
targets=torch.tensor([0, 1, 2]),
)
# tensor([1., 1., 1.])
.. code-block:: python
import torch
from catalyst import metrics
metrics.precision(
outputs=torch.tensor([[0, 0, 1, 1, 0, 1, 0, 1]]),
targets=torch.tensor([[0, 1, 0, 1, 0, 0, 1, 1]]),
)
# tensor([0.5000, 0.5000]
"""
precision_score, _, _, _, = precision_recall_fbeta_support(
outputs=outputs, targets=targets, argmax_dim=argmax_dim, eps=eps, num_classes=num_classes
)
return precision_score
__all__ = ["precision"]
|
the-stack_106_25135 | import torch
import torchvision.transforms as T
class Config:
# network settings
backbone = 'fmobile' # [resnet, fmobile]
metric = 'arcface' # [cosface, arcface]
embedding_size = 512
drop_ratio = 0.5
# data preprocess
input_shape = [1, 128, 128]
train_transform = T.Compose([
T.Grayscale(),
T.RandomHorizontalFlip(),
T.Resize((144, 144)),
T.RandomCrop(input_shape[1:]),
T.ToTensor(),
T.Normalize(mean=[0.5], std=[0.5]),
])
test_transform = T.Compose([
T.Grayscale(),
T.Resize(input_shape[1:]),
T.ToTensor(),
T.Normalize(mean=[0.5], std=[0.5]),
])
# dataset
train_root = '/data/CASIA-WebFace'
test_root = "/data/lfw-align-128"
test_list = "/data/lfw_test_pair.txt"
# training settings
checkpoints = "checkpoints"
restore = False
restore_model = ""
test_model = "18.pth"
train_batch_size = 64
test_batch_size = 60
epoch = 150
optimizer = 'sgd' # ['sgd', 'adam']
lr = 1e-1
lr_step = 10
lr_decay = 0.95
weight_decay = 5e-4
loss = 'focal_loss' # ['focal_loss', 'cross_entropy']
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pin_memory = True # if memory is large, set it True to speed up a bit
num_workers = 4 # dataloader
config = Config() |
the-stack_106_25141 | """
MIT License
Copyright (c) 2021 Ashwin Vallaban
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#Messaging Bot
#This project was mode for FCRIT HACK-X-TRONICS 2021
#6 July 2021
#
#This program is not ready for production use
#
#Telegram Bot Token was removed for security reasons
#Admin Authorization not added
import sys
import time
import telepot
from telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton
import json
import re
import os
import requests
#Telegram Bot Token-------------------------------------
TOKEN = "<>" #Token removed
#-------------------------------------------------------
EXTC_MENU = 'EXTC'
COMP_MENU = 'COMPUTER'
IT_MENU = 'IT'
MECH_MENU = 'MECHANICAL'
ALL_1 = 'All 1st year'
ALL_2 = 'All 2nd year'
ALL_3 = 'All 3rd year'
ALL_4 = 'All 4th year'
ALL_YR = 'Send to everyone'
ALL_1_DEPT = '1st year '
ALL_2_DEPT = '2nd year '
ALL_3_DEPT = '3rd year '
ALL_4_DEPT = '4th year '
ALL_YR_DEPT = 'Send to all years '
BACK_TO_MAINMENU = 'Back to main menu'
DONE_SENDING = 'Done'
#============ Regex =================
ALL_DEPT_RX = '.+'
ALL_DEPT_1ST_RX = '1.+'
ALL_DEPT_2ND_RX = '2.+'
ALL_DEPT_3RD_RX = '3.+'
ALL_DEPT_4TH_RX = '4.+'
ALL_YEAR_EXTC = '[1-4]EXTC'
ALL_YEAR_COMP = '[1-4]COMP'
ALL_YEAR_IT = '[1-4]IT'
ALL_YEAR_MECH = '[1-4]MECH'
EXTC_DEPT_RX = 'EXTC'
MECH_DEPT_RX = 'MECH'
IT_DEPT_RX = 'IT'
COMP_DEPT_RX = 'COMP'
#====================================
#func reads the txt file and returns it's contents
#filename should be with extension
def file_read(filename, filemode):
print('file_read()')
# Open a file: file
file = open(filename,mode=filemode)
# read all lines at once
itemdata = file.read()
# close the file
file.close()
print('File Read')
return itemdata
#=============================================== Main Menu ==========================================================
def main_menu(chat_id,done):
print('main_menu()')
temp_exec_cmd = """bot.sendMessage(chat_id, 'Main Menu',
reply_markup=ReplyKeyboardMarkup(
keyboard=["""
done_btn = """[KeyboardButton(text=DONE_SENDING)],"""
temp_exec_cmd2 = """[KeyboardButton(text=EXTC_MENU),KeyboardButton(text=COMP_MENU)],
[KeyboardButton(text=IT_MENU),KeyboardButton(text=MECH_MENU)],
[KeyboardButton(text=ALL_1),KeyboardButton(text=ALL_2),KeyboardButton(text=ALL_3),KeyboardButton(text=ALL_4)],
[KeyboardButton(text=ALL_YR)]
]
)
)"""
if done == 0:
exec(temp_exec_cmd + temp_exec_cmd2)
else:
exec(temp_exec_cmd + done_btn + temp_exec_cmd2)
print('Executed custom keyboard')
#=============================================== Sub-main Menu ==========================================================
def sub_main_menu(chat_id,DEPT_NAME,done):
print('sub_main_menu()')
temp_exec_cmd = """bot.sendMessage(chat_id, 'Sub-Main Menu',
reply_markup=ReplyKeyboardMarkup(
keyboard=["""
done_btn = """[KeyboardButton(text=DONE_SENDING + ' ' + DEPT_NAME)],"""
temp_exec_cmd2 = """[KeyboardButton(text=ALL_1_DEPT + DEPT_NAME),KeyboardButton(text=ALL_2_DEPT + DEPT_NAME),KeyboardButton(text=ALL_3_DEPT + DEPT_NAME),KeyboardButton(text=ALL_4_DEPT + DEPT_NAME)],
[KeyboardButton(text=ALL_YR_DEPT + '[' + DEPT_NAME + ']')],
[KeyboardButton(text=BACK_TO_MAINMENU)]
]
)
)"""
if done == 0:
exec(temp_exec_cmd + temp_exec_cmd2)
else:
exec(temp_exec_cmd + done_btn + temp_exec_cmd2)
print('Executed custom keyboard')
#############################
def on_callback_query(msg):
query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query')
print('Callback Query:', query_id, from_id, query_data)
#call delete func!
try:
inline_delete(re.findall(" .+", msg['data'])[0].strip(),from_id)
bot.sendMessage(from_id, 'deleted message!')
except:
bot.sendMessage(from_id, 'Message Not Found')
def inline_key_send(key_msg,callback_msg,chat_id):
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
keyboard = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=key_msg, callback_data=callback_msg)],
])
bot.sendMessage(chat_id, '_', reply_markup=keyboard)
#############################
#this func deletes the item
def db_clear(chat_id, done):
print('db_clear()')
itemdata = file_read('Admin/' + str(chat_id) + '/chat.txt','r')
json_dictionary = json.loads(itemdata)
v = ''
for k,v in json_dictionary.items():
if re.search("^#.+",v):
#delete files from local folder
print('deleting file: ' + re.findall("[A-z0-9].+", v)[0].strip())
os.remove('Admin/' + str(chat_id) + '/' + re.findall("[A-z0-9].+", v)[0].strip())
print('files deleted')
# Open a file: file
with open('Admin/' + str(chat_id) + '/chat.txt','w') as file:
file.write('{}') #clear chat db
# close the file
file.close()
bot.sendMessage(chat_id, 'Done! Cleared all messages!')
print('db cleared')
if done == 'Done':
main_menu(chat_id,0)
else:
sub_main_menu(chat_id,done,0)
print('menu updated')
return
#for deleting messages from inline keyboard
def inline_delete(message_id,chat_id):
print('inline_delete()')
itemdata = file_read('Admin/' + str(chat_id) + '/chat.txt','r')
json_dictionary = json.loads(itemdata)
if re.search("^#.+",json_dictionary[message_id]):
os.remove('Admin/' + str(chat_id) + '/' + re.findall("[A-z0-9].+", json_dictionary[message_id])[0].strip())
print('File deleted')
else:
print('Not a file')
del json_dictionary[message_id]
print('deleted')
with open('Admin/' + str(chat_id) + '/chat.txt','w') as file:
file.write(json.dumps(json_dictionary)) #update db
# close the file
file.close()
def chk_if_send_cmd(msg,chat_id):
if msg == ALL_1:
print('True')
send_messages(ALL_DEPT_1ST_RX,chat_id)
return True
elif msg == ALL_2:
print('True')
send_messages(ALL_DEPT_2ND_RX,chat_id)
return True
elif msg == ALL_3:
print('True')
send_messages(ALL_DEPT_3RD_RX,chat_id)
return True
elif msg == ALL_4:
print('True')
send_messages(ALL_DEPT_4TH_RX,chat_id)
return True
elif msg == ALL_YR:
print('True')
send_messages(ALL_DEPT_RX,chat_id)
return True
elif re.search(ALL_YR_DEPT + '\[(EXTC|MECH|IT|COMP)\]$',msg):
send_messages('[1-4]' + re.findall("\[(EXTC|MECH|IT|COMP)\]$", msg)[0].strip(), chat_id)
return True
elif re.search('[1-4].+year ' + '(EXTC|MECH|IT|COMP)$',msg):
print('True')
send_messages(re.findall("^[1-4]", msg)[0].strip() + re.findall("(EXTC|MECH|IT|COMP)$", msg)[0].strip(), chat_id)
return True
return False
#======================================== Send message ==========================================
def send_messages(rx,chat_id):
bot.sendMessage(chat_id, 'Sending messages please wait.....')
print('send_messages()')
print('regex: ' + rx)
filedata = file_read('Admin/' + str(chat_id) + '/chat.txt','r')
if filedata == '{}':
print('Nothing to send!')
bot.sendMessage(chat_id, 'Nothing to send!')
return
grpdata = file_read('groups.txt','r')
json_dictionary_grp = json.loads(grpdata)
for k,v in json_dictionary_grp.items():
if re.search(rx,k):
if v == 'NA':
print('Chat id not found skipped')
bot.sendMessage(chat_id, 'Group not made!')
else:
#send message
print(k + ' :' + v)
json_dictionary = json.loads(filedata)
for i,j in json_dictionary.items():
if re.search('^#.+',j):
if re.search('.+.png$',j):
print('photo in db')
bot.sendPhoto(v, photo=open('Admin/' + str(chat_id) + '/' + re.findall('[0-9]+',j)[0].strip() + '.png', 'rb'))
print('photo sent')
else:
print('file in db')
bot.sendDocument(v, document=open('Admin/' + str(chat_id) + '/' + re.findall('[A-z0-9].+',j)[0].strip()))
print('file sent')
else:
bot.sendMessage(v,j)
print('message sent')
bot.sendMessage(chat_id, 'Sent!')
if re.search('[1-4]\.\+',rx) or re.search('\.\+',rx):
main_menu(chat_id,1)
else:
sub_main_menu(chat_id,re.findall("[A-Za-z].+", rx)[0].strip(),1)
############################################# Root #######################################################
def add_chat_to_db(chat_id, msg, file_sent, content_type):
#check if files are available
#create file and folder if not available
filedata = ''
try:
filedata = file_read('Admin/' + str(chat_id) + '/chat.txt','r')
print('File available!')
except:
try:
os.mkdir('Admin/' + str(chat_id))
with open('Admin/' + str(chat_id) + '/chat.txt','w') as file:
file.write('{}')
file.close()
print('chat.txt file created successfully!')
filedata = '{}'
except:
print('Error creating file!')
file.close()
#------------------------------------------------------------------------
json_dictionary = json.loads(filedata)
if file_sent == False:
json_dictionary.update({msg['message_id']: msg['text']})
else:
if content_type == 'photo':
json_dictionary.update({msg['message_id']: '#' + str(msg['message_id']) + '.png'})
else:
json_dictionary.update({msg['message_id']: '#' + msg['document']['file_name']})
#--------------------- Add chat to DB ----------------------------------------
try:
with open('Admin/' + str(chat_id) + '/chat.txt','w') as file:
file.write(json.dumps(json_dictionary))
print('Chat added to DB')
file.close()
except:
print('Error updating the DB')
file.close()
############################
inline_key_send('delete sent msg', '#delete ' + str(msg['message_id']), chat_id)
############################
#---------------------Update Admin info txt file------------------------------
try:
with open('Admin/' + str(chat_id) + '/admininfo.txt','w') as file:
file.write(json.dumps(msg['from']))
file.close()
except:
print('Error updating the DB')
file.close()
def on_chat_message(msg):
print('\non_chat_message() msg received')
content_type, chat_type, chat_id = telepot.glance(msg)
if msg['chat']['type'] == 'group':
print('Message received from group. Chat ID: ' + str(chat_id) + '\nGroup name: ' + msg['chat']['title'])
bot.sendMessage(chat_id, 'Not Allowed! ' + str(chat_id) + '\nGroup name: ' + msg['chat']['title'])
return
if content_type == 'text':
#add auth here
#--------Check if done button should be added or not--------------
add_done_btn = 0
try:
if (file_read('Admin/' + str(chat_id) + '/chat.txt','r') == '{}') == False:
add_done_btn = 1
print('add done button')
else:
print('not adding done')
except:
print('not adding done button! chat.txt not found')
if msg['text'] == '/start':
main_menu(chat_id,add_done_btn)
print('show main menu')
#========================================
#Dept-wise sub menu
elif msg['text'] == EXTC_MENU:
sub_main_menu(chat_id,'EXTC',add_done_btn)
print('show sub main menu EXTC')
elif msg['text'] == COMP_MENU:
sub_main_menu(chat_id,'COMP',add_done_btn)
print('show sub main menu COMP')
elif msg['text'] == IT_MENU:
sub_main_menu(chat_id,'IT',add_done_btn)
print('show sub main menu IT')
elif msg['text'] == MECH_MENU:
sub_main_menu(chat_id,'MECH',add_done_btn)
print('show sub main menu MECH')
elif msg['text'] == BACK_TO_MAINMENU:
main_menu(chat_id,add_done_btn)
print('show main menu')
#========================================
elif re.search("^Done (EXTC|IT|COMP|MECH)", msg['text']):
#delete chat db and documents
db_clear(chat_id,re.findall("(EXTC|IT|COMP|MECH)$", msg['text'])[0].strip())
elif msg['text'] == 'Done':
db_clear(chat_id, 'Done')
#============ Regex =====================
elif re.search('[1-4AS].+',msg['text']) and chk_if_send_cmd(msg['text'],chat_id):
pass
else:
add_chat_to_db(chat_id, msg, False, content_type)
# media file sent!
else:
print('File sent!')
add_chat_to_db(chat_id, msg, True, content_type)
#------------- Download the file ---------------------
if content_type == 'photo':
bot.download_file(msg['photo'][-1]['file_id'], 'Admin/' + str(chat_id) + '/' + str(msg['message_id']) + '.png')
else:
bot.download_file(msg['document']['file_id'], 'Admin/' + str(chat_id) + '/' + msg['document']['file_name'])
#-----------------------------------------------------
##############################################################
bot = telepot.Bot(TOKEN)
print('Listening ...')
bot.message_loop({'chat': on_chat_message,
'callback_query': on_callback_query}, run_forever=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.