hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7946a1b746e880a69e881eee572c87e4be02b590 | 2,404 | py | Python | chatterbox/tests/test_utils_facebook.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | 8 | 2015-03-10T20:03:09.000Z | 2018-06-14T23:03:58.000Z | chatterbox/tests/test_utils_facebook.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | 3 | 2015-07-14T22:44:47.000Z | 2020-06-05T23:43:05.000Z | chatterbox/tests/test_utils_facebook.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | null | null | null | import json
from django.test import TestCase
from .utils import load_json
from chatterbox.utils.facebook import activity_dict_from_dict
class FacebookUtils(TestCase):
def test_facebook_link_activity_dict_from_dict(self):
data = load_json("facebook-in-basic-link")
final = load_json("facebook-out-basic-link")
output = activity_dict_from_dict(data)
value1 = json.dumps(output, sort_keys=True)
value2 = json.dumps(final, sort_keys=True)
self.assertEqual(value1, value2)
def test_facebook_photo_activity_dict_from_dict(self):
data = load_json("facebook-in-basic-photo")
final = load_json("facebook-out-basic-photo")
output = activity_dict_from_dict(data)
value1 = json.dumps(output, sort_keys=True)
value2 = json.dumps(final, sort_keys=True)
self.assertEqual(value1, value2)
def test_facebook_basic_status_message_activity_dict_from_dict(self):
data = load_json("facebook-in-basic-status+message")
final = load_json("facebook-out-basic-status+message")
output = activity_dict_from_dict(data)
value1 = json.dumps(output, sort_keys=True)
value2 = json.dumps(final, sort_keys=True)
self.assertEqual(value1, value2)
def test_facebook_basic_status_message_comments_activity_dict_from_dict(self):
data = load_json("facebook-in-basic-status+message+comments")
final = load_json("facebook-out-basic-status+message+comments")
output = activity_dict_from_dict(data)
value1 = json.dumps(output, sort_keys=True)
value2 = json.dumps(final, sort_keys=True)
self.assertEqual(value1, value2)
def test_facebook_status_activity_dict_from_dict(self):
data = load_json("facebook-in-basic-status")
final = load_json("facebook-out-basic-status")
output = activity_dict_from_dict(data)
value1 = json.dumps(output, sort_keys=True)
value2 = json.dumps(final, sort_keys=True)
self.assertEqual(value1, value2)
def test_facebook_video_activity_dict_from_dict(self):
data = load_json("facebook-in-basic-video")
final = load_json("facebook-out-basic-video")
output = activity_dict_from_dict(data)
value1 = json.dumps(output, sort_keys=True)
value2 = json.dumps(final, sort_keys=True)
self.assertEqual(value1, value2)
| 42.928571 | 82 | 0.707571 |
7946a24d65e7accfdaad6b33ab5b3d3818b6c9a1 | 2,681 | py | Python | main/tests/tests_management.py | jhdulaney/oh-ubiome-source | c01bd6c26fb6625ce455e3c2b24ff009bf2f0c64 | [
"MIT"
] | null | null | null | main/tests/tests_management.py | jhdulaney/oh-ubiome-source | c01bd6c26fb6625ce455e3c2b24ff009bf2f0c64 | [
"MIT"
] | 4 | 2020-02-11T23:02:53.000Z | 2021-06-10T20:40:55.000Z | main/tests/tests_management.py | jhdulaney/oh-ubiome-source | c01bd6c26fb6625ce455e3c2b24ff009bf2f0c64 | [
"MIT"
] | 2 | 2018-07-13T15:51:18.000Z | 2018-12-27T20:54:52.000Z | from django.test import TestCase, RequestFactory
import vcr
from django.conf import settings
from django.core.management import call_command
from open_humans.models import OpenHumansMember
import requests_mock
class ManagementTestCase(TestCase):
"""
test that files are parsed correctly
"""
def setUp(self):
"""
Set up the app for following tests
"""
settings.DEBUG = True
call_command('init_proj_config')
self.factory = RequestFactory()
data = {"access_token": 'myaccesstoken',
"refresh_token": 'bar',
"expires_in": 36000}
self.oh_member = OpenHumansMember.create(oh_id='1234',
data=data)
self.oh_member.save()
self.user = self.oh_member.user
self.user.save()
@vcr.use_cassette('main/tests/fixtures/process_file.yaml',
record_mode='none')
def test_management_process_file(self):
with requests_mock.Mocker() as m:
m.register_uri("GET",
"https://www.openhumans.org/api/direct-sharing/project/exchange-member/?access_token=myaccesstoken",
json={'data':
[{'id': 34567,
'basename': '23andme_valid.txt',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}]})
m.register_uri("POST",
"https://www.openhumans.org/api/direct-sharing/project/files/delete/?access_token=myaccesstoken")
call_command('process_files')
@vcr.use_cassette('main/tests/fixtures/import_test_file.yaml',
record_mode='none')
def test_management_import_user(self):
self.assertEqual(len(OpenHumansMember.objects.all()),
1)
call_command('import_users',
infile='main/tests/fixtures/test_import.csv',
delimiter=',')
old_oh_member = OpenHumansMember.objects.get(oh_id='1234')
self.assertEqual(old_oh_member.refresh_token,
'bar')
new_oh_member = OpenHumansMember.objects.get(oh_id='2345')
self.assertEqual(new_oh_member.refresh_token,
'new_refresh')
| 43.95082 | 185 | 0.56658 |
7946a360386bfa19eba8a4dab74830fbc01079d9 | 8,636 | py | Python | ebay/resolution_case_management.py | roopeshvaddepally/python-ebay | d49edd513459b2f0531cab0d7e840f53347ad1ad | [
"Apache-2.0"
] | 55 | 2015-01-20T10:08:01.000Z | 2022-01-11T18:01:45.000Z | ebay/resolution_case_management.py | roopeshvaddepally/python-ebay | d49edd513459b2f0531cab0d7e840f53347ad1ad | [
"Apache-2.0"
] | 3 | 2016-07-17T09:23:36.000Z | 2017-02-08T09:34:57.000Z | ebay/resolution_case_management.py | roopeshvaddepally/python-ebay | d49edd513459b2f0531cab0d7e840f53347ad1ad | [
"Apache-2.0"
] | 31 | 2015-02-17T08:44:44.000Z | 2021-12-23T00:54:04.000Z | import urllib2
from lxml import etree
from utils import get_config_store
# case retrieval calls
def getUserCases(caseStatusFilter=None,
caseTypeFilter=None,
creationDateRangeFilterFrom=None,
creationDateRangeFilterTo=None,
itemFilter=None,
paginationInput=None,
sortOrder=None,
encoding="JSON"):
root = etree.Element("getUserCasesRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
#caseStatusFilter is a List
if caseStatusFilter:
caseStatusFilter_elem = etree.SubElement(root, "caseStatusFilter")
for status in caseStatusFilter:
caseStatus_elem = etree.SubElement(caseStatusFilter_elem, "caseStatus")
caseStatus_elem.text = status
#caseTypeFilter is a List
if caseTypeFilter:
caseTypeFilter_elem = etree.SubElement(root, "caseTypeFilter")
for case_type in caseTypeFilter:
caseType_elem = etree.SubElement(caseStatusFilter_elem, "caseType")
caseType_elem.text = case_type
if creationDateRangeFilterFrom and creationDateRangeFilterTo:
creationDateRangeFilter_elem = etree.SubElement(root, "creationDateRangeFilter")
creationDateRangeFilterFrom_elem = etree.SubElement(creationDateRangeFilter_elem, "fromDate")
creationDateRangeFilterFrom_elem.text = creationDateRangeFilterFrom
creationDateRangeFilterTo_elem = etree.SubElement(creationDateRangeFilter_elem, "toDate")
creationDateRangeFilterTo_elem.text = creationDateRangeFilterTo
#itemFilter is a dict: {itemId:123, transactionId:72}
if itemFilter and len(itemFilter) > 0:
itemFilter_elem = etree.SubElement(root, "itemFilter")
for key in itemFilter.keys():
itemId_elem = etree.SubElement(itemFilter_elem, key)
itemId_elem.text = itemFilter[key]
# paginationInput is a dict: {entriesPerPage:5, pageNumber:10}
if paginationInput and len(paginationInput) > 0:
paginationInput_elem = etree.SubElement(root, "paginationInput")
for key in paginationInput.keys():
input_values_elem = etree.SubElement(paginationInput_elem, key)
input_values_elem.text = paginationInput[key]
if sortOrder:
sortOrder_elem = etree.SubElement(root, "sortOrder")
sortOrder_elem.text = sortOrder
request = etree.tostring(root, pretty_print=True)
return get_response(getUserCases.__name__, request, encoding)
def getEBPCaseDetail(caseId, caseType, encoding="JSON"):
root = etree.Element("getEBPCaseDetailRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
caseId_elem = etree.SubElement(root, "caseId")
id_elem = etree.SubElement(caseId_elem, "id")
id_elem.text = caseId
type_elem = etree.SubElement(caseId_elem, "type")
type_elem.text = caseType
request = etree.tostring(root, pretty_print=True)
return get_response(getEBPCaseDetail.__name__, request, encoding)
# Seller Option Calls
def provideTrackingInfo(caseId, caseType, carrierUsed, trackingNumber, comments=None, encoding="JSON"):
root = etree.Element("provideTrackingInfoRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
caseId_elem = etree.SubElement(root, "caseId")
id_elem = etree.SubElement(caseId_elem, "id")
id_elem.text = caseId
type_elem = etree.SubElement(caseId_elem, "type")
type_elem.text = caseType
carrierUsed_elem = etree.SubElement(root, "carrierUsed")
carrierUsed_elem.text = carrierUsed
trackingNumber_elem = etree.SubElement(root, "trackingNumber")
trackingNumber_elem.text = trackingNumber
if comments:
comments_elem = etree.SubElement(root, "comments")
comments_elem.text = comments
request = etree.tostring(root, pretty_print=True)
return get_response(provideTrackingInfo.__name__, request, encoding)
def issueFullRefund(caseId, caseType, comments=None, encoding="JSON"):
root = etree.Element("issueFullRefundRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
caseId_elem = etree.SubElement(root, "caseId")
id_elem = etree.SubElement(caseId_elem, "id")
id_elem.text = caseId
type_elem = etree.SubElement(caseId_elem, "type")
type_elem.text = caseType
if comments:
comments_elem = etree.SubElement(root, "comments")
comments_elem.text = comments
request = etree.tostring(root, pretty_print=True)
return get_response(issueFullRefund.__name__, request, encoding)
def offerOtherSolution(caseId, caseType, messageToBuyer, encoding="JSON"):
root = etree.Element("offerOtherSolutionRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
caseId_elem = etree.SubElement(root, "caseId")
id_elem = etree.SubElement(caseId_elem, "id")
id_elem.text = caseId
type_elem = etree.SubElement(caseId_elem, "type")
type_elem.text = caseType
messageToBuyer_elem = etree.SubElement(root, "messageToBuyer")
messageToBuyer_elem.text = messageToBuyer
request = etree.tostring(root, pretty_print=True)
return get_response(offerOtherSolution.__name__, request, encoding)
#NOT WORKING on SANDBOX, need to investigate
def escalateToCustomerSuppport(caseId, caseType, escalationReason,
comments=None, encoding="JSON"):
root = etree.Element("escalateToCustomerSuppportRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
caseId_elem = etree.SubElement(root, "caseId")
id_elem = etree.SubElement(caseId_elem, "id")
id_elem.text = caseId
type_elem = etree.SubElement(caseId_elem, "type")
type_elem.text = caseType
#escalationReason is a dict
escalationReason_elem = etree.SubElement(root, "escalationReason")
for key in escalationReason.keys():
key_elem = etree.SubElement(escalationReason_elem, key)
key_elem.text = escalationReason[key]
if comments:
comments_elem = etree.SubElement(root, "comments")
comments_elem.text = comments
request = etree.tostring(root, pretty_print=True)
return get_response(escalateToCustomerSuppport.__name__, request, encoding)
def appealToCustomerSupport(caseId, caseType, appealReason,
comments=None, encoding="JSON"):
root = etree.Element("appealToCustomerSupportRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
caseId_elem = etree.SubElement(root, "caseId")
id_elem = etree.SubElement(caseId_elem, "id")
id_elem.text = caseId
type_elem = etree.SubElement(caseId_elem, "type")
type_elem.text = caseType
appealReason_elem = etree.SubElement(root, "appealReason")
appealReason_elem.text = appealReason
if comments:
comments_elem = etree.SubElement(root, "comments")
comments_elem.text = comments
request = etree.tostring(root, pretty_print=True)
return get_response(appealToCustomerSupport.__name__, request, encoding)
# Metadata calls
def getActivityOptions(caseId, caseType, encoding="JSON"):
root = etree.Element("getActivityOptionsRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
caseId_elem = etree.SubElement(root, "caseId")
id_elem = etree.SubElement(caseId_elem, "id")
id_elem.text = caseId
type_elem = etree.SubElement(caseId_elem, "type")
type_elem.text = caseType
request = etree.tostring(root, pretty_print=True)
return get_response(getActivityOptions.__name__, request, encoding)
def getVersion(encoding="JSON"):
root = etree.Element("getVersionRequest",
xmlns="http://www.ebay.com/marketplace/search/v1/services")
request = etree.tostring(root, pretty_print=True)
return get_response(getVersion.__name__, request, encoding)
def get_response(operation_name, data, encoding, **headers):
config = get_config_store()
access_token = config.get("auth", "token")
endpoint = config.get("endpoints", "resolution_case_management")
http_headers = {
"X-EBAY-SOA-OPERATION-NAME": operation_name,
"X-EBAY-SOA-SECURITY-TOKEN": access_token,
"X-EBAY-SOA-RESPONSE-DATA-FORMAT": encoding}
http_headers.update(headers)
req = urllib2.Request(endpoint, data, http_headers)
res = urllib2.urlopen(req)
data = res.read()
return data
| 38.553571 | 103 | 0.701945 |
7946a37de353db77b3a7c35cbde2bee8564ab190 | 55 | py | Python | modules.py | bab81/IntroPython | 87cd55f0afdbfe69f28c8ec95b782c2c4c76eb50 | [
"MIT"
] | null | null | null | modules.py | bab81/IntroPython | 87cd55f0afdbfe69f28c8ec95b782c2c4c76eb50 | [
"MIT"
] | null | null | null | modules.py | bab81/IntroPython | 87cd55f0afdbfe69f28c8ec95b782c2c4c76eb50 | [
"MIT"
] | null | null | null |
import useful_tools
print(useful_tools.roll_dice(10)) | 13.75 | 33 | 0.836364 |
7946a3a4b723dfc35d2a69da1583ec08e2a9102c | 2,955 | py | Python | scripts/quest/q22750s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/quest/q22750s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/quest/q22750s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | KINESIS = 1531000
JAY = 1531000
YUNA = 1531002
NERO = 1531003
sm.lockForIntro()
sm.removeAdditionalEffect()
sm.blind(1, 255, 0, 0)
sm.spawnNpc(YUNA, 760, -272)
sm.showNpcSpecialActionByTemplateId(YUNA, "summon")
sm.spawnNpc(NERO, 600, -272)
sm.showNpcSpecialActionByTemplateId(NERO, "summon")
sm.teleportInField(647, -272)
sm.forcedFlip(False)
sm.zoomCamera(0, 2000, 0, 700, -200)
sm.blind(1, 255, 0, 0)
sm.sendDelay(1200)
sm.blind(0, 0, 0, 1000)
sm.sendDelay(1400)
sm.setIntroBoxChat(KINESIS)
sm.sendNext("Did you cry, Yuna?")
sm.showNpcSpecialActionByTemplateId(YUNA, "angry", -1)
sm.setIntroBoxChat(YUNA)
sm.sendSay("#face0#No, you dummy! I didn't cry.")
sm.stopNpcSpecialActionByTemplateId(YUNA)
sm.setIntroBoxChat(KINESIS)
sm.sendSay("#face2#You can hug me if you like. We can make it a touching reunion.")
sm.setIntroBoxChat(YUNA)
sm.sendSay("#face0#You jerk! You have no idea...")
sm.sendSay("#face4#Ahh, nevermind. So... What's with the clothes? You look like a wizard from some video game.")
sm.setIntroBoxChat(KINESIS)
sm.sendSay("#face3#It's a long story. What do you think? I can pull it off, right?")
sm.sendDelay(500)
sm.zoomCamera(500, 2000, 500, 800, -200)
sm.sendDelay(sm.getAnswerVal())
sm.setIntroBoxChat(JAY)
sm.sendSay("I knew you'd be fine. ")
sm.setIntroBoxChat(KINESIS)
sm.sendSay("Wow, Jay. Can't even pretend to be happy to see me?")
sm.setIntroBoxChat(JAY)
sm.sendSay("#face2#I mean, I knew something was up because your signal kept coming from the sinkhole, but your vitals were all over the place. Strange, but I'm used to that with you. So, what happened? And who's the kid?")
sm.setIntroBoxChat(YUNA)
sm.sendSay("#face1#Oh yeah, who IS that kid?")
sm.setIntroBoxChat(KINESIS)
sm.sendSay("Jay, you're a sharp cookie. I bet you already figured out what happened to me. As for the kid, this is Nero the cat.")
sm.setIntroBoxChat(NERO)
sm.sendSay("#face0#Ahem! I am #bNella Medionel Roonia#k. In our ancient tongue, it means half-child of the sacred moonlight. See, I'm from a real big-deal magician family...")
sm.setIntroBoxChat(KINESIS)
sm.sendSay("You can call her Nero.")
sm.setIntroBoxChat(NERO)
sm.sendSay("#face2#Mrrrow!")
sm.setIntroBoxChat(JAY)
sm.sendSay(" #face1#Oo-kaaay...")
sm.setIntroBoxChat(YUNA)
sm.sendSay(" #face4#Wait, so... that kid is the black cat? The black cat is this kid?")
sm.setIntroBoxChat(NERO)
sm.sendSay(" #face2#Correct! ")
sm.setIntroBoxChat(YUNA)
sm.sendSay(" #face4#")
sm.setIntroBoxChat(JAY)
sm.sendSay(" #face6#")
sm.setIntroBoxChat(KINESIS)
sm.sendSay(" #face2#Okay, so... There's a lot to go over, so let's continue this over some food. Preferably something that's not Cold Eye Tail Spicy Soup.")
sm.zoomCamera(0, 1000, 2147483647, 2147483647, 2147483647)
sm.moveCamera(True, 0, 0, 0)
sm.sendDelay(300)
sm.removeOverlapScreen(1000)
sm.unlockForIntro()
sm.startQuest(parentID)
sm.completeQuest(parentID)
sm.giveExp(3000)
sm.removeNpc(YUNA)
sm.removeNpc(NERO) | 29.257426 | 222 | 0.743824 |
7946a41c791be3875e67642aa31542e326df9ae5 | 3,568 | py | Python | non-overlap/evaluate_acc.py | MalekNaimi/VGGish-Model | 61369097fe669c642f15a94f964e9130b0218756 | [
"MIT"
] | null | null | null | non-overlap/evaluate_acc.py | MalekNaimi/VGGish-Model | 61369097fe669c642f15a94f964e9130b0218756 | [
"MIT"
] | null | null | null | non-overlap/evaluate_acc.py | MalekNaimi/VGGish-Model | 61369097fe669c642f15a94f964e9130b0218756 | [
"MIT"
] | null | null | null | from tqdm import tqdm
import numpy as np
from collections import Counter
import random
with open('/content/VGGish-Model/non-overlap/test_clip_length.txt','r') as f:
line = f.readline().split()
test_clip_length = [int(x) for x in line]
with open('/content/VGGish-Model/non-overlap/test_preds.txt','r') as f:
line = f.readline().split()
test_preds = [int(x) for x in line]
with open('/content/VGGish-Model/non-overlap/test_labels.txt','r') as f:
line = f.readline().split()
test_labels = [int(x) for x in line]
with open('/content/VGGish-Model/non-overlap/test_logits.txt','r') as f:
line = f.readlines()
test_logits = np.asarray([[float(y) for y in x.strip().split()] for x in line])
def confusion_matrix(label,pred,num_classes=4):
hist = np.zeros((num_classes,num_classes))
for i,j in zip(label,pred):
hist[i,j] += 1
axis_sum = np.sum(hist,axis=1)
return hist
y_clip_label = []
abs_cnt,standard_cnt,logits_cnt = [],[],[]
for i in tqdm(range(len(test_clip_length)-1)):
start,end = test_clip_length[i],test_clip_length[i+1]
y_labels = test_labels[start:end]
y_preds = test_preds[start:end]
y_logits = test_logits[start:end]
clip_label = Counter(y_labels).most_common(1)[0][0]
y_clip_label.append(clip_label)
y_pred_max_count = Counter(y_preds).most_common(1)[0][1]
y_pred_dict = dict(Counter(y_preds))
# List of the classes that are predicted maximum times.
# For eg. if predictions = [1,1,2,3,4,3,2,1,2]
# max_freq = [1,2] as both of them occur maximum(3)
# times in predictions.
max_freq = []
for key in list(y_pred_dict.keys()):
if y_pred_dict[key] == y_pred_max_count:
max_freq.append(key)
max_freq = list(set(list(max_freq)))
# print(clip_label,max_freq[0],Counter(y_labels),Counter(y_preds))
# Absolute Clip Accuracy (Strictest Accuracy): If the model predicts only one
# class for each and every frame of a particular clip then
# only we assume that the model has predicted the class of the clip.
# Note that this can't predict for all clips. For e.g. if 8/10 frames of a clip are predicted 1 and rest are predicted zero, we can't predict any class.
abs_cnt.append(1) if clip_label == max_freq[0] and len(Counter(y_preds)) == 1 else abs_cnt.append(0)
# Standard Clip Accuracy: If len(max_freq) == 1 i.e. there is exactly one
# class with maximum frequency and that class is the clip label, we assume
# the model has correctly predicted the class of the clip. Statistically
# speaking if the clip label is the mode of the predictions.For example,there is a
# 5 second clip of class 1 and the model predicts = [1,1,2,3,0] then we say
# that the model has predicted correct. If it predicts [1,1,2,2,3], we say it is incorrect.
standard_cnt.append(max_freq[0])
# Average Logits Accuracy
logits_cnt.append(np.argmax(np.mean(y_logits,axis=0)))
# Frame Level Accuracy.
frame_cnt = [1 for x,y in zip(test_labels,test_preds) if x == y]
print('frame_accuracy:',np.sum(frame_cnt)/float(len(test_labels)))
print('absolute_clip_accuracy:',np.sum(abs_cnt)/float(len(test_clip_length)-1))
print('standard_clip_accuracy:',np.sum(np.asarray(standard_cnt) == np.asarray(y_clip_label))/float(len(test_clip_length)-1))
print('average_logits_clip_accuracy:',np.sum(np.asarray(logits_cnt) == np.asarray(y_clip_label))/float(len(test_clip_length)-1))
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
print(confusion_matrix(y_clip_label,standard_cnt))
| 43.512195 | 156 | 0.706278 |
7946a4dd9f09f5ccf4e23f689103336a1fe5247f | 4,906 | py | Python | WANNRelease/WANN/wann_src/dataGatherer.py | tusharganguli/brain-tokyo-workshop | 4ccee864229598f5af29af56197fb5fc7240c1c9 | [
"Apache-2.0"
] | null | null | null | WANNRelease/WANN/wann_src/dataGatherer.py | tusharganguli/brain-tokyo-workshop | 4ccee864229598f5af29af56197fb5fc7240c1c9 | [
"Apache-2.0"
] | null | null | null | WANNRelease/WANN/wann_src/dataGatherer.py | tusharganguli/brain-tokyo-workshop | 4ccee864229598f5af29af56197fb5fc7240c1c9 | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import copy
from .ind import exportNet
import logging
from wann_train import rank
class DataGatherer():
''' Data recorder for WANN algorithm'''
def __init__(self, filename, hyp):
"""
Args:
filename - (string) - path+prefix of file output destination
hyp - (dict) - algorithm hyperparameters
"""
logging.info(str(rank) + "__init__")
self.filename = filename # File name path + prefix
self.p = hyp
# Initialize empty fields
self.elite = []
self.best = []
self.bestFitVec = []
self.spec_fit = []
self.field = ['x_scale','fit_med','fit_max','fit_top','fit_peak',\
'node_med','conn_med',\
'elite','best']
self.objVals = np.array([])
for f in self.field[:-2]:
exec('self.' + f + ' = np.array([])')
#e.g. self.fit_max = np.array([])
self.newBest = False
def gatherData(self, pop, species):
# Readability
fitness = [ind.fitness for ind in pop]
peakfit = [ind.fitMax for ind in pop]
nodes = np.asarray([np.shape(ind.node)[1] for ind in pop])
conns = np.asarray([ind.nConn for ind in pop])
# --- Evaluation Scale ---------------------------------------------------
if len(self.x_scale) is 0:
self.x_scale = np.append(self.x_scale, len(pop))
else:
self.x_scale = np.append(self.x_scale, self.x_scale[-1]+len(pop))
# ------------------------------------------------------------------------
# --- Best Individual ----------------------------------------------------
self.elite.append(pop[np.argmax(fitness)])
if len(self.best) is 0:
self.best = copy.deepcopy(self.elite)
elif (self.elite[-1].fitness > self.best[-1].fitness):
self.best = np.append(self.best,copy.deepcopy(self.elite[-1]))
self.newBest = True
else:
self.best = np.append(self.best,copy.deepcopy(self.best[-1]))
self.newBest = False
# ------------------------------------------------------------------------
# --- Generation fit/complexity stats ------------------------------------
self.node_med = np.append(self.node_med,np.median(nodes))
self.conn_med = np.append(self.conn_med,np.median(conns))
self.fit_med = np.append(self.fit_med, np.median(fitness))
self.fit_max = np.append(self.fit_max, self.elite[-1].fitness)
self.fit_top = np.append(self.fit_top, self.best[-1].fitness)
self.fit_peak = np.append(self.fit_peak, self.best[-1].fitMax)
# ------------------------------------------------------------------------
# --- MOO Fronts ---------------------------------------------------------
if len(self.objVals) == 0:
self.objVals = np.c_[fitness,peakfit,conns]
else:
self.objVals = np.c_[self.objVals, np.c_[fitness,peakfit,conns]]
# ------------------------------------------------------------------------
def display(self):
return "|---| Elite Fit: " + '{:.2f}'.format(self.fit_max[-1]) \
+ " \t|---| Best Fit: " + '{:.2f}'.format(self.fit_top[-1]) \
+ " \t|---| Peak Fit: " + '{:.2f}'.format(self.fit_peak[-1])
def save(self, gen=(-1), saveFullPop=False):
''' Save data to disk '''
filename = self.filename
pref = 'log/' + filename
# --- Generation fit/complexity stats ------------------------------------
gStatLabel = ['x_scale',\
'fit_med','fit_max','fit_top','fit_peak',\
'node_med','conn_med']
genStats = np.empty((len(self.x_scale),0))
for i in range(len(gStatLabel)):
#e.g. self. fit_max [:,None]
evalString = 'self.' + gStatLabel[i] + '[:,None]'
genStats = np.hstack((genStats, eval(evalString)))
lsave(pref + '_stats.out', genStats)
# ------------------------------------------------------------------------
# --- Best Individual ----------------------------------------------------
wMat = self.best[gen].wMat
aVec = self.best[gen].aVec
exportNet(pref + '_best.out',wMat,aVec)
if gen > 1:
folder = 'log/' + filename + '_best/'
if not os.path.exists(folder):
os.makedirs(folder)
exportNet(folder + str(gen).zfill(4) +'.out',wMat,aVec)
# ------------------------------------------------------------------------
# --- MOO Fronts ---------------------------------------------------------
lsave(pref + '_objVals.out',self.objVals)
# ------------------------------------------------------------------------
def savePop(self,pop,filename):
folder = 'log/' + filename + '_pop/'
if not os.path.exists(folder):
os.makedirs(folder)
for i in range(len(pop)):
exportNet(folder+'ind_'+str(i)+'.out', pop[i].wMat, pop[i].aVec)
def lsave(filename, data):
np.savetxt(filename, data, delimiter=',',fmt='%1.2e')
| 35.810219 | 79 | 0.481247 |
7946a63af83551baa3012f3eb8b4c6d01d05492d | 47,427 | py | Python | cassandra/cqltypes.py | lauranovich/python-driver | 89e4b246f259db38322d2f9a7c1111a4ff380c92 | [
"Apache-2.0"
] | 1,163 | 2015-01-01T03:02:05.000Z | 2022-03-22T13:04:00.000Z | cassandra/cqltypes.py | lauranovich/python-driver | 89e4b246f259db38322d2f9a7c1111a4ff380c92 | [
"Apache-2.0"
] | 556 | 2015-01-05T16:39:29.000Z | 2022-03-26T20:51:36.000Z | cassandra/cqltypes.py | lauranovich/python-driver | 89e4b246f259db38322d2f9a7c1111a4ff380c92 | [
"Apache-2.0"
] | 449 | 2015-01-05T10:28:59.000Z | 2022-03-14T23:15:32.000Z | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Representation of Cassandra data types. These classes should make it simple for
the library (and caller software) to deal with Cassandra-style Java class type
names and CQL type specifiers, and convert between them cleanly. Parameterized
types are fully supported in both flavors. Once you have the right Type object
for the type you want, you can use it to serialize, deserialize, or retrieve
the corresponding CQL or Cassandra type strings.
"""
# NOTE:
# If/when the need arises for interpret types from CQL string literals in
# different ways (for https://issues.apache.org/jira/browse/CASSANDRA-3799,
# for example), these classes would be a good place to tack on
# .from_cql_literal() and .as_cql_literal() classmethods (or whatever).
from __future__ import absolute_import # to enable import io from stdlib
import ast
from binascii import unhexlify
import calendar
from collections import namedtuple
from decimal import Decimal
import io
from itertools import chain
import logging
import re
import socket
import time
import six
from six.moves import range
import struct
import sys
from uuid import UUID
from cassandra.marshal import (int8_pack, int8_unpack, int16_pack, int16_unpack,
uint16_pack, uint16_unpack, uint32_pack, uint32_unpack,
int32_pack, int32_unpack, int64_pack, int64_unpack,
float_pack, float_unpack, double_pack, double_unpack,
varint_pack, varint_unpack, point_be, point_le,
vints_pack, vints_unpack)
from cassandra import util
_little_endian_flag = 1 # we always serialize LE
if six.PY3:
import ipaddress
_ord = ord if six.PY2 else lambda x: x
apache_cassandra_type_prefix = 'org.apache.cassandra.db.marshal.'
cassandra_empty_type = 'org.apache.cassandra.db.marshal.EmptyType'
cql_empty_type = 'empty'
log = logging.getLogger(__name__)
if six.PY3:
_number_types = frozenset((int, float))
long = int
def _name_from_hex_string(encoded_name):
bin_str = unhexlify(encoded_name)
return bin_str.decode('ascii')
else:
_number_types = frozenset((int, long, float))
_name_from_hex_string = unhexlify
def trim_if_startswith(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return s
_casstypes = {}
_cqltypes = {}
cql_type_scanner = re.Scanner((
('frozen', None),
(r'[a-zA-Z0-9_]+', lambda s, t: t),
(r'[\s,<>]', None),
))
def cql_types_from_string(cql_type):
return cql_type_scanner.scan(cql_type)[0]
class CassandraTypeType(type):
"""
The CassandraType objects in this module will normally be used directly,
rather than through instances of those types. They can be instantiated,
of course, but the type information is what this driver mainly needs.
This metaclass registers CassandraType classes in the global
by-cassandra-typename and by-cql-typename registries, unless their class
name starts with an underscore.
"""
def __new__(metacls, name, bases, dct):
dct.setdefault('cassname', name)
cls = type.__new__(metacls, name, bases, dct)
if not name.startswith('_'):
_casstypes[name] = cls
if not cls.typename.startswith(apache_cassandra_type_prefix):
_cqltypes[cls.typename] = cls
return cls
casstype_scanner = re.Scanner((
(r'[()]', lambda s, t: t),
(r'[a-zA-Z0-9_.:=>]+', lambda s, t: t),
(r'[\s,]', None),
))
def cqltype_to_python(cql_string):
"""
Given a cql type string, creates a list that can be manipulated in python
Example:
int -> ['int']
frozen<tuple<text, int>> -> ['frozen', ['tuple', ['text', 'int']]]
"""
scanner = re.Scanner((
(r'[a-zA-Z0-9_]+', lambda s, t: "'{}'".format(t)),
(r'<', lambda s, t: ', ['),
(r'>', lambda s, t: ']'),
(r'[, ]', lambda s, t: t),
(r'".*?"', lambda s, t: "'{}'".format(t)),
))
scanned_tokens = scanner.scan(cql_string)[0]
hierarchy = ast.literal_eval(''.join(scanned_tokens))
return [hierarchy] if isinstance(hierarchy, str) else list(hierarchy)
def python_to_cqltype(types):
"""
Opposite of the `cql_to_python` function. Given a python list, creates a cql type string from the representation
Example:
['int'] -> int
['frozen', ['tuple', ['text', 'int']]] -> frozen<tuple<text, int>>
"""
scanner = re.Scanner((
(r"'[a-zA-Z0-9_]+'", lambda s, t: t[1:-1]),
(r'^\[', lambda s, t: None),
(r'\]$', lambda s, t: None),
(r',\s*\[', lambda s, t: '<'),
(r'\]', lambda s, t: '>'),
(r'[, ]', lambda s, t: t),
(r'\'".*?"\'', lambda s, t: t[1:-1]),
))
scanned_tokens = scanner.scan(repr(types))[0]
cql = ''.join(scanned_tokens).replace('\\\\', '\\')
return cql
def _strip_frozen_from_python(types):
"""
Given a python list representing a cql type, removes 'frozen'
Example:
['frozen', ['tuple', ['text', 'int']]] -> ['tuple', ['text', 'int']]
"""
while 'frozen' in types:
index = types.index('frozen')
types = types[:index] + types[index + 1] + types[index + 2:]
new_types = [_strip_frozen_from_python(item) if isinstance(item, list) else item for item in types]
return new_types
def strip_frozen(cql):
"""
Given a cql type string, and removes frozen
Example:
frozen<tuple<int>> -> tuple<int>
"""
types = cqltype_to_python(cql)
types_without_frozen = _strip_frozen_from_python(types)
cql = python_to_cqltype(types_without_frozen)
return cql
def lookup_casstype_simple(casstype):
"""
Given a Cassandra type name (either fully distinguished or not), hand
back the CassandraType class responsible for it. If a name is not
recognized, a custom _UnrecognizedType subclass will be created for it.
This function does not handle complex types (so no type parameters--
nothing with parentheses). Use lookup_casstype() instead if you might need
that.
"""
shortname = trim_if_startswith(casstype, apache_cassandra_type_prefix)
try:
typeclass = _casstypes[shortname]
except KeyError:
typeclass = mkUnrecognizedType(casstype)
return typeclass
def parse_casstype_args(typestring):
tokens, remainder = casstype_scanner.scan(typestring)
if remainder:
raise ValueError("weird characters %r at end" % remainder)
# use a stack of (types, names) lists
args = [([], [])]
for tok in tokens:
if tok == '(':
args.append(([], []))
elif tok == ')':
types, names = args.pop()
prev_types, prev_names = args[-1]
prev_types[-1] = prev_types[-1].apply_parameters(types, names)
else:
types, names = args[-1]
parts = re.split(':|=>', tok)
tok = parts.pop()
if parts:
names.append(parts[0])
else:
names.append(None)
ctype = lookup_casstype_simple(tok)
types.append(ctype)
# return the first (outer) type, which will have all parameters applied
return args[0][0][0]
def lookup_casstype(casstype):
"""
Given a Cassandra type as a string (possibly including parameters), hand
back the CassandraType class responsible for it. If a name is not
recognized, a custom _UnrecognizedType subclass will be created for it.
Example:
>>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)')
<class 'cassandra.cqltypes.MapType(UTF8Type, Int32Type)'>
"""
if isinstance(casstype, (CassandraType, CassandraTypeType)):
return casstype
try:
return parse_casstype_args(casstype)
except (ValueError, AssertionError, IndexError) as e:
raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e))
def is_reversed_casstype(data_type):
return issubclass(data_type, ReversedType)
class EmptyValue(object):
""" See _CassandraType.support_empty_values """
def __str__(self):
return "EMPTY"
__repr__ = __str__
EMPTY = EmptyValue()
@six.add_metaclass(CassandraTypeType)
class _CassandraType(object):
subtypes = ()
num_subtypes = 0
empty_binary_ok = False
support_empty_values = False
"""
Back in the Thrift days, empty strings were used for "null" values of
all types, including non-string types. For most users, an empty
string value in an int column is the same as being null/not present,
so the driver normally returns None in this case. (For string-like
types, it *will* return an empty string by default instead of None.)
To avoid this behavior, set this to :const:`True`. Instead of returning
None for empty string values, the EMPTY singleton (an instance
of EmptyValue) will be returned.
"""
def __repr__(self):
return '<%s( %r )>' % (self.cql_parameterized_type(), self.val)
@classmethod
def from_binary(cls, byts, protocol_version):
"""
Deserialize a bytestring into a value. See the deserialize() method
for more information. This method differs in that if None or the empty
string is passed in, None may be returned.
"""
if byts is None:
return None
elif len(byts) == 0 and not cls.empty_binary_ok:
return EMPTY if cls.support_empty_values else None
return cls.deserialize(byts, protocol_version)
@classmethod
def to_binary(cls, val, protocol_version):
"""
Serialize a value into a bytestring. See the serialize() method for
more information. This method differs in that if None is passed in,
the result is the empty string.
"""
return b'' if val is None else cls.serialize(val, protocol_version)
@staticmethod
def deserialize(byts, protocol_version):
"""
Given a bytestring, deserialize into a value according to the protocol
for this type. Note that this does not create a new instance of this
class; it merely gives back a value that would be appropriate to go
inside an instance of this class.
"""
return byts
@staticmethod
def serialize(val, protocol_version):
"""
Given a value appropriate for this class, serialize it according to the
protocol for this type and return the corresponding bytestring.
"""
return val
@classmethod
def cass_parameterized_type_with(cls, subtypes, full=False):
"""
Return the name of this type as it would be expressed by Cassandra,
optionally fully qualified. If subtypes is not None, it is expected
to be a list of other CassandraType subclasses, and the output
string includes the Cassandra names for those subclasses as well,
as parameters to this one.
Example:
>>> LongType.cass_parameterized_type_with(())
'LongType'
>>> LongType.cass_parameterized_type_with((), full=True)
'org.apache.cassandra.db.marshal.LongType'
>>> SetType.cass_parameterized_type_with([DecimalType], full=True)
'org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.DecimalType)'
"""
cname = cls.cassname
if full and '.' not in cname:
cname = apache_cassandra_type_prefix + cname
if not subtypes:
return cname
sublist = ', '.join(styp.cass_parameterized_type(full=full) for styp in subtypes)
return '%s(%s)' % (cname, sublist)
@classmethod
def apply_parameters(cls, subtypes, names=None):
"""
Given a set of other CassandraTypes, create a new subtype of this type
using them as parameters. This is how composite types are constructed.
>>> MapType.apply_parameters([DateType, BooleanType])
<class 'cassandra.cqltypes.MapType(DateType, BooleanType)'>
`subtypes` will be a sequence of CassandraTypes. If provided, `names`
will be an equally long sequence of column names or Nones.
"""
if cls.num_subtypes != 'UNKNOWN' and len(subtypes) != cls.num_subtypes:
raise ValueError("%s types require %d subtypes (%d given)"
% (cls.typename, cls.num_subtypes, len(subtypes)))
newname = cls.cass_parameterized_type_with(subtypes)
if six.PY2 and isinstance(newname, unicode):
newname = newname.encode('utf-8')
return type(newname, (cls,), {'subtypes': subtypes, 'cassname': cls.cassname, 'fieldnames': names})
@classmethod
def cql_parameterized_type(cls):
"""
Return a CQL type specifier for this type. If this type has parameters,
they are included in standard CQL <> notation.
"""
if not cls.subtypes:
return cls.typename
return '%s<%s>' % (cls.typename, ', '.join(styp.cql_parameterized_type() for styp in cls.subtypes))
@classmethod
def cass_parameterized_type(cls, full=False):
"""
Return a Cassandra type specifier for this type. If this type has
parameters, they are included in the standard () notation.
"""
return cls.cass_parameterized_type_with(cls.subtypes, full=full)
# it's initially named with a _ to avoid registering it as a real type, but
# client programs may want to use the name still for isinstance(), etc
CassandraType = _CassandraType
class _UnrecognizedType(_CassandraType):
num_subtypes = 'UNKNOWN'
if six.PY3:
def mkUnrecognizedType(casstypename):
return CassandraTypeType(casstypename,
(_UnrecognizedType,),
{'typename': "'%s'" % casstypename})
else:
def mkUnrecognizedType(casstypename): # noqa
return CassandraTypeType(casstypename.encode('utf8'),
(_UnrecognizedType,),
{'typename': "'%s'" % casstypename})
class BytesType(_CassandraType):
typename = 'blob'
empty_binary_ok = True
@staticmethod
def serialize(val, protocol_version):
return six.binary_type(val)
class DecimalType(_CassandraType):
typename = 'decimal'
@staticmethod
def deserialize(byts, protocol_version):
scale = int32_unpack(byts[:4])
unscaled = varint_unpack(byts[4:])
return Decimal('%de%d' % (unscaled, -scale))
@staticmethod
def serialize(dec, protocol_version):
try:
sign, digits, exponent = dec.as_tuple()
except AttributeError:
try:
sign, digits, exponent = Decimal(dec).as_tuple()
except Exception:
raise TypeError("Invalid type for Decimal value: %r", dec)
unscaled = int(''.join([str(digit) for digit in digits]))
if sign:
unscaled *= -1
scale = int32_pack(-exponent)
unscaled = varint_pack(unscaled)
return scale + unscaled
class UUIDType(_CassandraType):
typename = 'uuid'
@staticmethod
def deserialize(byts, protocol_version):
return UUID(bytes=byts)
@staticmethod
def serialize(uuid, protocol_version):
try:
return uuid.bytes
except AttributeError:
raise TypeError("Got a non-UUID object for a UUID value")
class BooleanType(_CassandraType):
typename = 'boolean'
@staticmethod
def deserialize(byts, protocol_version):
return bool(int8_unpack(byts))
@staticmethod
def serialize(truth, protocol_version):
return int8_pack(truth)
class ByteType(_CassandraType):
typename = 'tinyint'
@staticmethod
def deserialize(byts, protocol_version):
return int8_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return int8_pack(byts)
if six.PY2:
class AsciiType(_CassandraType):
typename = 'ascii'
empty_binary_ok = True
else:
class AsciiType(_CassandraType):
typename = 'ascii'
empty_binary_ok = True
@staticmethod
def deserialize(byts, protocol_version):
return byts.decode('ascii')
@staticmethod
def serialize(var, protocol_version):
try:
return var.encode('ascii')
except UnicodeDecodeError:
return var
class FloatType(_CassandraType):
typename = 'float'
@staticmethod
def deserialize(byts, protocol_version):
return float_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return float_pack(byts)
class DoubleType(_CassandraType):
typename = 'double'
@staticmethod
def deserialize(byts, protocol_version):
return double_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return double_pack(byts)
class LongType(_CassandraType):
typename = 'bigint'
@staticmethod
def deserialize(byts, protocol_version):
return int64_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return int64_pack(byts)
class Int32Type(_CassandraType):
typename = 'int'
@staticmethod
def deserialize(byts, protocol_version):
return int32_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return int32_pack(byts)
class IntegerType(_CassandraType):
typename = 'varint'
@staticmethod
def deserialize(byts, protocol_version):
return varint_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return varint_pack(byts)
class InetAddressType(_CassandraType):
typename = 'inet'
@staticmethod
def deserialize(byts, protocol_version):
if len(byts) == 16:
return util.inet_ntop(socket.AF_INET6, byts)
else:
# util.inet_pton could also handle, but this is faster
# since we've already determined the AF
return socket.inet_ntoa(byts)
@staticmethod
def serialize(addr, protocol_version):
try:
if ':' in addr:
return util.inet_pton(socket.AF_INET6, addr)
else:
# util.inet_pton could also handle, but this is faster
# since we've already determined the AF
return socket.inet_aton(addr)
except:
if six.PY3 and isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
return addr.packed
raise ValueError("can't interpret %r as an inet address" % (addr,))
class CounterColumnType(LongType):
typename = 'counter'
cql_timestamp_formats = (
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%dT%H:%M',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%d'
)
_have_warned_about_timestamps = False
class DateType(_CassandraType):
typename = 'timestamp'
@staticmethod
def interpret_datestring(val):
if val[-5] in ('+', '-'):
offset = (int(val[-4:-2]) * 3600 + int(val[-2:]) * 60) * int(val[-5] + '1')
val = val[:-5]
else:
offset = -time.timezone
for tformat in cql_timestamp_formats:
try:
tval = time.strptime(val, tformat)
except ValueError:
continue
# scale seconds to millis for the raw value
return (calendar.timegm(tval) + offset) * 1e3
else:
raise ValueError("can't interpret %r as a date" % (val,))
@staticmethod
def deserialize(byts, protocol_version):
timestamp = int64_unpack(byts) / 1000.0
return util.datetime_from_timestamp(timestamp)
@staticmethod
def serialize(v, protocol_version):
try:
# v is datetime
timestamp_seconds = calendar.timegm(v.utctimetuple())
timestamp = timestamp_seconds * 1e3 + getattr(v, 'microsecond', 0) / 1e3
except AttributeError:
try:
timestamp = calendar.timegm(v.timetuple()) * 1e3
except AttributeError:
# Ints and floats are valid timestamps too
if type(v) not in _number_types:
raise TypeError('DateType arguments must be a datetime, date, or timestamp')
timestamp = v
return int64_pack(long(timestamp))
class TimestampType(DateType):
pass
class TimeUUIDType(DateType):
typename = 'timeuuid'
def my_timestamp(self):
return util.unix_time_from_uuid1(self.val)
@staticmethod
def deserialize(byts, protocol_version):
return UUID(bytes=byts)
@staticmethod
def serialize(timeuuid, protocol_version):
try:
return timeuuid.bytes
except AttributeError:
raise TypeError("Got a non-UUID object for a UUID value")
class SimpleDateType(_CassandraType):
typename = 'date'
date_format = "%Y-%m-%d"
# Values of the 'date'` type are encoded as 32-bit unsigned integers
# representing a number of days with epoch (January 1st, 1970) at the center of the
# range (2^31).
EPOCH_OFFSET_DAYS = 2 ** 31
@staticmethod
def deserialize(byts, protocol_version):
days = uint32_unpack(byts) - SimpleDateType.EPOCH_OFFSET_DAYS
return util.Date(days)
@staticmethod
def serialize(val, protocol_version):
try:
days = val.days_from_epoch
except AttributeError:
if isinstance(val, six.integer_types):
# the DB wants offset int values, but util.Date init takes days from epoch
# here we assume int values are offset, as they would appear in CQL
# short circuit to avoid subtracting just to add offset
return uint32_pack(val)
days = util.Date(val).days_from_epoch
return uint32_pack(days + SimpleDateType.EPOCH_OFFSET_DAYS)
class ShortType(_CassandraType):
typename = 'smallint'
@staticmethod
def deserialize(byts, protocol_version):
return int16_unpack(byts)
@staticmethod
def serialize(byts, protocol_version):
return int16_pack(byts)
class TimeType(_CassandraType):
typename = 'time'
@staticmethod
def deserialize(byts, protocol_version):
return util.Time(int64_unpack(byts))
@staticmethod
def serialize(val, protocol_version):
try:
nano = val.nanosecond_time
except AttributeError:
nano = util.Time(val).nanosecond_time
return int64_pack(nano)
class DurationType(_CassandraType):
typename = 'duration'
@staticmethod
def deserialize(byts, protocol_version):
months, days, nanoseconds = vints_unpack(byts)
return util.Duration(months, days, nanoseconds)
@staticmethod
def serialize(duration, protocol_version):
try:
m, d, n = duration.months, duration.days, duration.nanoseconds
except AttributeError:
raise TypeError('DurationType arguments must be a Duration.')
return vints_pack([m, d, n])
class UTF8Type(_CassandraType):
typename = 'text'
empty_binary_ok = True
@staticmethod
def deserialize(byts, protocol_version):
return byts.decode('utf8')
@staticmethod
def serialize(ustr, protocol_version):
try:
return ustr.encode('utf-8')
except UnicodeDecodeError:
# already utf-8
return ustr
class VarcharType(UTF8Type):
typename = 'varchar'
class _ParameterizedType(_CassandraType):
num_subtypes = 'UNKNOWN'
@classmethod
def deserialize(cls, byts, protocol_version):
if not cls.subtypes:
raise NotImplementedError("can't deserialize unparameterized %s"
% cls.typename)
return cls.deserialize_safe(byts, protocol_version)
@classmethod
def serialize(cls, val, protocol_version):
if not cls.subtypes:
raise NotImplementedError("can't serialize unparameterized %s"
% cls.typename)
return cls.serialize_safe(val, protocol_version)
class _SimpleParameterizedType(_ParameterizedType):
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
if protocol_version >= 3:
unpack = int32_unpack
length = 4
else:
unpack = uint16_unpack
length = 2
numelements = unpack(byts[:length])
p = length
result = []
inner_proto = max(3, protocol_version)
for _ in range(numelements):
itemlen = unpack(byts[p:p + length])
p += length
if itemlen < 0:
result.append(None)
else:
item = byts[p:p + itemlen]
p += itemlen
result.append(subtype.from_binary(item, inner_proto))
return cls.adapter(result)
@classmethod
def serialize_safe(cls, items, protocol_version):
if isinstance(items, six.string_types):
raise TypeError("Received a string for a type that expects a sequence")
subtype, = cls.subtypes
pack = int32_pack if protocol_version >= 3 else uint16_pack
buf = io.BytesIO()
buf.write(pack(len(items)))
inner_proto = max(3, protocol_version)
for item in items:
itembytes = subtype.to_binary(item, inner_proto)
buf.write(pack(len(itembytes)))
buf.write(itembytes)
return buf.getvalue()
class ListType(_SimpleParameterizedType):
typename = 'list'
num_subtypes = 1
adapter = list
class SetType(_SimpleParameterizedType):
typename = 'set'
num_subtypes = 1
adapter = util.sortedset
class MapType(_ParameterizedType):
typename = 'map'
num_subtypes = 2
@classmethod
def deserialize_safe(cls, byts, protocol_version):
key_type, value_type = cls.subtypes
if protocol_version >= 3:
unpack = int32_unpack
length = 4
else:
unpack = uint16_unpack
length = 2
numelements = unpack(byts[:length])
p = length
themap = util.OrderedMapSerializedKey(key_type, protocol_version)
inner_proto = max(3, protocol_version)
for _ in range(numelements):
key_len = unpack(byts[p:p + length])
p += length
if key_len < 0:
keybytes = None
key = None
else:
keybytes = byts[p:p + key_len]
p += key_len
key = key_type.from_binary(keybytes, inner_proto)
val_len = unpack(byts[p:p + length])
p += length
if val_len < 0:
val = None
else:
valbytes = byts[p:p + val_len]
p += val_len
val = value_type.from_binary(valbytes, inner_proto)
themap._insert_unchecked(key, keybytes, val)
return themap
@classmethod
def serialize_safe(cls, themap, protocol_version):
key_type, value_type = cls.subtypes
pack = int32_pack if protocol_version >= 3 else uint16_pack
buf = io.BytesIO()
buf.write(pack(len(themap)))
try:
items = six.iteritems(themap)
except AttributeError:
raise TypeError("Got a non-map object for a map value")
inner_proto = max(3, protocol_version)
for key, val in items:
keybytes = key_type.to_binary(key, inner_proto)
valbytes = value_type.to_binary(val, inner_proto)
buf.write(pack(len(keybytes)))
buf.write(keybytes)
buf.write(pack(len(valbytes)))
buf.write(valbytes)
return buf.getvalue()
class TupleType(_ParameterizedType):
typename = 'tuple'
@classmethod
def deserialize_safe(cls, byts, protocol_version):
proto_version = max(3, protocol_version)
p = 0
values = []
for col_type in cls.subtypes:
if p == len(byts):
break
itemlen = int32_unpack(byts[p:p + 4])
p += 4
if itemlen >= 0:
item = byts[p:p + itemlen]
p += itemlen
else:
item = None
# collections inside UDTs are always encoded with at least the
# version 3 format
values.append(col_type.from_binary(item, proto_version))
if len(values) < len(cls.subtypes):
nones = [None] * (len(cls.subtypes) - len(values))
values = values + nones
return tuple(values)
@classmethod
def serialize_safe(cls, val, protocol_version):
if len(val) > len(cls.subtypes):
raise ValueError("Expected %d items in a tuple, but got %d: %s" %
(len(cls.subtypes), len(val), val))
proto_version = max(3, protocol_version)
buf = io.BytesIO()
for item, subtype in zip(val, cls.subtypes):
if item is not None:
packed_item = subtype.to_binary(item, proto_version)
buf.write(int32_pack(len(packed_item)))
buf.write(packed_item)
else:
buf.write(int32_pack(-1))
return buf.getvalue()
@classmethod
def cql_parameterized_type(cls):
subtypes_string = ', '.join(sub.cql_parameterized_type() for sub in cls.subtypes)
return 'frozen<tuple<%s>>' % (subtypes_string,)
class UserType(TupleType):
typename = "org.apache.cassandra.db.marshal.UserType"
_cache = {}
_module = sys.modules[__name__]
@classmethod
def make_udt_class(cls, keyspace, udt_name, field_names, field_types):
assert len(field_names) == len(field_types)
if six.PY2 and isinstance(udt_name, unicode):
udt_name = udt_name.encode('utf-8')
instance = cls._cache.get((keyspace, udt_name))
if not instance or instance.fieldnames != field_names or instance.subtypes != field_types:
instance = type(udt_name, (cls,), {'subtypes': field_types,
'cassname': cls.cassname,
'typename': udt_name,
'fieldnames': field_names,
'keyspace': keyspace,
'mapped_class': None,
'tuple_type': cls._make_registered_udt_namedtuple(keyspace, udt_name, field_names)})
cls._cache[(keyspace, udt_name)] = instance
return instance
@classmethod
def evict_udt_class(cls, keyspace, udt_name):
if six.PY2 and isinstance(udt_name, unicode):
udt_name = udt_name.encode('utf-8')
try:
del cls._cache[(keyspace, udt_name)]
except KeyError:
pass
@classmethod
def apply_parameters(cls, subtypes, names):
keyspace = subtypes[0].cass_parameterized_type() # when parsed from cassandra type, the keyspace is created as an unrecognized cass type; This gets the name back
udt_name = _name_from_hex_string(subtypes[1].cassname)
field_names = tuple(_name_from_hex_string(encoded_name) for encoded_name in names[2:]) # using tuple here to match what comes into make_udt_class from other sources (for caching equality test)
return cls.make_udt_class(keyspace, udt_name, field_names, tuple(subtypes[2:]))
@classmethod
def cql_parameterized_type(cls):
return "frozen<%s>" % (cls.typename,)
@classmethod
def deserialize_safe(cls, byts, protocol_version):
values = super(UserType, cls).deserialize_safe(byts, protocol_version)
if cls.mapped_class:
return cls.mapped_class(**dict(zip(cls.fieldnames, values)))
elif cls.tuple_type:
return cls.tuple_type(*values)
else:
return tuple(values)
@classmethod
def serialize_safe(cls, val, protocol_version):
proto_version = max(3, protocol_version)
buf = io.BytesIO()
for i, (fieldname, subtype) in enumerate(zip(cls.fieldnames, cls.subtypes)):
# first treat as a tuple, else by custom type
try:
item = val[i]
except TypeError:
item = getattr(val, fieldname)
if item is not None:
packed_item = subtype.to_binary(item, proto_version)
buf.write(int32_pack(len(packed_item)))
buf.write(packed_item)
else:
buf.write(int32_pack(-1))
return buf.getvalue()
@classmethod
def _make_registered_udt_namedtuple(cls, keyspace, name, field_names):
# this is required to make the type resolvable via this module...
# required when unregistered udts are pickled for use as keys in
# util.OrderedMap
t = cls._make_udt_tuple_type(name, field_names)
if t:
qualified_name = "%s_%s" % (keyspace, name)
setattr(cls._module, qualified_name, t)
return t
@classmethod
def _make_udt_tuple_type(cls, name, field_names):
# fallback to positional named, then unnamed tuples
# for CQL identifiers that aren't valid in Python,
try:
t = namedtuple(name, field_names)
except ValueError:
try:
t = namedtuple(name, util._positional_rename_invalid_identifiers(field_names))
log.warning("could not create a namedtuple for '%s' because one or more "
"field names are not valid Python identifiers (%s); "
"returning positionally-named fields" % (name, field_names))
except ValueError:
t = None
log.warning("could not create a namedtuple for '%s' because the name is "
"not a valid Python identifier; will return tuples in "
"its place" % (name,))
return t
class CompositeType(_ParameterizedType):
typename = "org.apache.cassandra.db.marshal.CompositeType"
@classmethod
def cql_parameterized_type(cls):
"""
There is no CQL notation for Composites, so we override this.
"""
typestring = cls.cass_parameterized_type(full=True)
return "'%s'" % (typestring,)
@classmethod
def deserialize_safe(cls, byts, protocol_version):
result = []
for subtype in cls.subtypes:
if not byts:
# CompositeType can have missing elements at the end
break
element_length = uint16_unpack(byts[:2])
element = byts[2:2 + element_length]
# skip element length, element, and the EOC (one byte)
byts = byts[2 + element_length + 1:]
result.append(subtype.from_binary(element, protocol_version))
return tuple(result)
class DynamicCompositeType(_ParameterizedType):
typename = "org.apache.cassandra.db.marshal.DynamicCompositeType"
@classmethod
def cql_parameterized_type(cls):
sublist = ', '.join('%s=>%s' % (alias, typ.cass_parameterized_type(full=True)) for alias, typ in zip(cls.fieldnames, cls.subtypes))
return "'%s(%s)'" % (cls.typename, sublist)
class ColumnToCollectionType(_ParameterizedType):
"""
This class only really exists so that we can cleanly evaluate types when
Cassandra includes this. We don't actually need or want the extra
information.
"""
typename = "org.apache.cassandra.db.marshal.ColumnToCollectionType"
class ReversedType(_ParameterizedType):
typename = "org.apache.cassandra.db.marshal.ReversedType"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts, protocol_version)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
class FrozenType(_ParameterizedType):
typename = "frozen"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts, protocol_version)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
def is_counter_type(t):
if isinstance(t, six.string_types):
t = lookup_casstype(t)
return issubclass(t, CounterColumnType)
def cql_typename(casstypename):
"""
Translate a Cassandra-style type specifier (optionally-fully-distinguished
Java class names for data types, along with optional parameters) into a
CQL-style type specifier.
>>> cql_typename('DateType')
'timestamp'
>>> cql_typename('org.apache.cassandra.db.marshal.ListType(IntegerType)')
'list<varint>'
"""
return lookup_casstype(casstypename).cql_parameterized_type()
class WKBGeometryType(object):
POINT = 1
LINESTRING = 2
POLYGON = 3
class PointType(CassandraType):
typename = 'PointType'
_type = struct.pack('<BI', _little_endian_flag, WKBGeometryType.POINT)
@staticmethod
def serialize(val, protocol_version):
return PointType._type + point_le.pack(val.x, val.y)
@staticmethod
def deserialize(byts, protocol_version):
is_little_endian = bool(_ord(byts[0]))
point = point_le if is_little_endian else point_be
return util.Point(*point.unpack_from(byts, 5)) # ofs = endian byte + int type
class LineStringType(CassandraType):
typename = 'LineStringType'
_type = struct.pack('<BI', _little_endian_flag, WKBGeometryType.LINESTRING)
@staticmethod
def serialize(val, protocol_version):
num_points = len(val.coords)
return LineStringType._type + struct.pack('<I' + 'dd' * num_points, num_points, *(d for coords in val.coords for d in coords))
@staticmethod
def deserialize(byts, protocol_version):
is_little_endian = bool(_ord(byts[0]))
point = point_le if is_little_endian else point_be
coords = ((point.unpack_from(byts, offset) for offset in range(1 + 4 + 4, len(byts), point.size))) # start = endian + int type + int count
return util.LineString(coords)
class PolygonType(CassandraType):
typename = 'PolygonType'
_type = struct.pack('<BI', _little_endian_flag, WKBGeometryType.POLYGON)
_ring_count = struct.Struct('<I').pack
@staticmethod
def serialize(val, protocol_version):
buf = io.BytesIO(PolygonType._type)
buf.seek(0, 2)
if val.exterior.coords:
num_rings = 1 + len(val.interiors)
buf.write(PolygonType._ring_count(num_rings))
for ring in chain((val.exterior,), val.interiors):
num_points = len(ring.coords)
buf.write(struct.pack('<I' + 'dd' * num_points, num_points, *(d for coord in ring.coords for d in coord)))
else:
buf.write(PolygonType._ring_count(0))
return buf.getvalue()
@staticmethod
def deserialize(byts, protocol_version):
is_little_endian = bool(_ord(byts[0]))
if is_little_endian:
int_fmt = '<i'
point = point_le
else:
int_fmt = '>i'
point = point_be
p = 5
ring_count = struct.unpack_from(int_fmt, byts, p)[0]
p += 4
rings = []
for _ in range(ring_count):
point_count = struct.unpack_from(int_fmt, byts, p)[0]
p += 4
end = p + point_count * point.size
rings.append([point.unpack_from(byts, offset) for offset in range(p, end, point.size)])
p = end
return util.Polygon(exterior=rings[0], interiors=rings[1:]) if rings else util.Polygon()
class BoundKind(object):
"""
"Enum" representing the 6 possible DateRangeTypes
"""
SINGLE_DATE = 'SINGLE_DATE'
CLOSED_RANGE = 'CLOSED_RANGE'
OPEN_RANGE_HIGH = 'OPEN_RANGE_HIGH'
OPEN_RANGE_LOW = 'OPEN_RANGE_LOW'
BOTH_OPEN_RANGE = 'BOTH_OPEN_RANGE'
SINGLE_DATE_OPEN = 'SINGLE_DATE_OPEN'
VALID_RANGE_BOUNDS = (SINGLE_DATE, CLOSED_RANGE, OPEN_RANGE_HIGH,
OPEN_RANGE_LOW, BOTH_OPEN_RANGE,
SINGLE_DATE_OPEN)
_bound_str_to_int_map = {
'SINGLE_DATE': 0,
'CLOSED_RANGE': 1,
'OPEN_RANGE_HIGH': 2,
'OPEN_RANGE_LOW': 3,
'BOTH_OPEN_RANGE': 4,
'SINGLE_DATE_OPEN': 5,
}
_bound_int_to_str_map = {i: s for i, s in
six.iteritems(_bound_str_to_int_map)}
@classmethod
def to_int(cls, bound_str):
"""
Encode a string as an int for serialization.
"""
return cls._bound_str_to_int_map[bound_str.upper()]
@classmethod
def to_str(cls, bound_int):
"""
Decode an int to a string for deserialization.
"""
return cls._bound_int_to_str_map[bound_int.upper()]
class DateRangeType(CassandraType):
typename = 'daterange'
_precision_str_to_int_map = {
'YEAR': 0,
'MONTH': 1,
'DAY': 2,
'HOUR': 3,
'MINUTE': 4,
'SECOND': 5,
'MILLISECOND': 6
}
_precision_int_to_str_map = {s: i for i, s in
six.iteritems(_precision_str_to_int_map)}
@classmethod
def _encode_precision(cls, precision_str):
normalized_str = precision_str.upper()
if normalized_str not in cls._precision_str_to_int_map:
raise ValueError(
'%s is not a valid DateRange precision string. Valid values: %s' %
(repr(precision_str), ', '.join(list(cls._precision_str_to_int_map)))
)
return cls._precision_str_to_int_map[normalized_str]
@classmethod
def _decode_precision(cls, precision_int):
if precision_int not in cls._precision_int_to_str_map:
raise ValueError(
'%s not a valid DateRange precision int. Valid values: %s' %
(precision_int, ', '.join([str(i) for i in cls._precision_int_to_str_map]))
)
return cls._precision_int_to_str_map[precision_int]
@classmethod
def deserialize(cls, byts, protocol_version):
# <type>[<time0><precision0>[<time1><precision1>]]
type_ = int8_unpack(byts[0:1])
if type_ in (BoundKind.to_int(BoundKind.BOTH_OPEN_RANGE),
BoundKind.to_int(BoundKind.SINGLE_DATE_OPEN)):
time0 = precision0 = None
else:
time0 = int64_unpack(byts[1:9])
precision0 = int8_unpack(byts[9:10])
if type_ == BoundKind.to_int(BoundKind.CLOSED_RANGE):
time1 = int64_unpack(byts[10:18])
precision1 = int8_unpack(byts[18:19])
else:
time1 = precision1 = None
if time0 is not None:
date_range_bound0 = util.DateRangeBound(
time0,
cls._decode_precision(precision0)
)
if time1 is not None:
date_range_bound1 = util.DateRangeBound(
time1,
cls._decode_precision(precision1)
)
if type_ == BoundKind.to_int(BoundKind.SINGLE_DATE):
return util.DateRange(value=date_range_bound0)
if type_ == BoundKind.to_int(BoundKind.CLOSED_RANGE):
return util.DateRange(lower_bound=date_range_bound0,
upper_bound=date_range_bound1)
if type_ == BoundKind.to_int(BoundKind.OPEN_RANGE_HIGH):
return util.DateRange(lower_bound=date_range_bound0,
upper_bound=util.OPEN_BOUND)
if type_ == BoundKind.to_int(BoundKind.OPEN_RANGE_LOW):
return util.DateRange(lower_bound=util.OPEN_BOUND,
upper_bound=date_range_bound0)
if type_ == BoundKind.to_int(BoundKind.BOTH_OPEN_RANGE):
return util.DateRange(lower_bound=util.OPEN_BOUND,
upper_bound=util.OPEN_BOUND)
if type_ == BoundKind.to_int(BoundKind.SINGLE_DATE_OPEN):
return util.DateRange(value=util.OPEN_BOUND)
raise ValueError('Could not deserialize %r' % (byts,))
@classmethod
def serialize(cls, v, protocol_version):
buf = io.BytesIO()
bound_kind, bounds = None, ()
try:
value = v.value
except AttributeError:
raise ValueError(
'%s.serialize expects an object with a value attribute; got'
'%r' % (cls.__name__, v)
)
if value is None:
try:
lower_bound, upper_bound = v.lower_bound, v.upper_bound
except AttributeError:
raise ValueError(
'%s.serialize expects an object with lower_bound and '
'upper_bound attributes; got %r' % (cls.__name__, v)
)
if lower_bound == util.OPEN_BOUND and upper_bound == util.OPEN_BOUND:
bound_kind = BoundKind.BOTH_OPEN_RANGE
elif lower_bound == util.OPEN_BOUND:
bound_kind = BoundKind.OPEN_RANGE_LOW
bounds = (upper_bound,)
elif upper_bound == util.OPEN_BOUND:
bound_kind = BoundKind.OPEN_RANGE_HIGH
bounds = (lower_bound,)
else:
bound_kind = BoundKind.CLOSED_RANGE
bounds = lower_bound, upper_bound
else: # value is not None
if value == util.OPEN_BOUND:
bound_kind = BoundKind.SINGLE_DATE_OPEN
else:
bound_kind = BoundKind.SINGLE_DATE
bounds = (value,)
if bound_kind is None:
raise ValueError(
'Cannot serialize %r; could not find bound kind' % (v,)
)
buf.write(int8_pack(BoundKind.to_int(bound_kind)))
for bound in bounds:
buf.write(int64_pack(bound.milliseconds))
buf.write(int8_pack(cls._encode_precision(bound.precision)))
return buf.getvalue()
| 33.305478 | 201 | 0.62068 |
7946a6416f519c5f58c1ab8b925fb3e041cb396f | 8,371 | py | Python | docs/conf.py | natteruw/worms | 6530505d4fca3229bb93738a0bae0463a17229b8 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | natteruw/worms | 6530505d4fca3229bb93738a0bae0463a17229b8 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | natteruw/worms | 6530505d4fca3229bb93738a0bae0463a17229b8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# worms documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import worms
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'worms'
copyright = u"2018, Will Sheffler"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = worms.__version__
# The full version, including alpha/beta/rc tags.
release = worms.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = [] # ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wormsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'worms.tex',
u'worms Documentation',
u'Will Sheffler', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'worms',
u'worms Documentation',
[u'Will Sheffler'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'worms',
u'worms Documentation',
u'Will Sheffler',
'worms',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.32971 | 76 | 0.713774 |
7946a9adf00a8ac19b3e2287ecbd9131cd462281 | 3,296 | py | Python | test/test_generate/test_autograder.py | afspies/otter-grader | 5d5ba1b049596eaee1fd9c039bb1cd15f6786f4f | [
"BSD-3-Clause"
] | 1 | 2021-10-01T02:02:55.000Z | 2021-10-01T02:02:55.000Z | test/test_generate/test_autograder.py | afspies/otter-grader | 5d5ba1b049596eaee1fd9c039bb1cd15f6786f4f | [
"BSD-3-Clause"
] | null | null | null | test/test_generate/test_autograder.py | afspies/otter-grader | 5d5ba1b049596eaee1fd9c039bb1cd15f6786f4f | [
"BSD-3-Clause"
] | null | null | null | ####################################
##### Tests for otter generate #####
####################################
import json
import os
import shutil
import subprocess
import unittest
from glob import glob
from subprocess import PIPE
from unittest import mock
from otter.generate import main as generate
from .. import TestCase
TEST_FILES_PATH = "test/test_generate/test-autograder/"
class TestAutograder(TestCase):
def test_autograder(self):
"""
Check that the correct zipfile is created by gs_generator.py
"""
# create the zipfile
generate(
tests_dir = TEST_FILES_PATH + "tests",
output_path = TEST_FILES_PATH + "autograder.zip",
requirements = TEST_FILES_PATH + "requirements.txt",
files = [TEST_FILES_PATH + "data/test-df.csv"],
no_environment = True, # don't use the environment.yml in the root of the repo
)
with self.unzip_to_temp(TEST_FILES_PATH + "autograder.zip", delete=True) as unzipped_dir:
self.assertDirsEqual(unzipped_dir, TEST_FILES_PATH + "autograder-correct")
def test_autograder_with_token(self):
"""
Checks otter assign with token specified instead of username and password.
"""
# create the zipfile
with mock.patch("otter.generate.APIClient") as mocked_client:
generate(
tests_dir = TEST_FILES_PATH + "tests",
output_path = TEST_FILES_PATH + "autograder.zip",
requirements = TEST_FILES_PATH + "requirements.txt",
config = TEST_FILES_PATH + "otter_config.json",
files = [TEST_FILES_PATH + "data/test-df.csv"],
no_environment = True, # don't use the environment.yml in the root of the repo
)
mocked_client.assert_not_called()
with self.unzip_to_temp(TEST_FILES_PATH + "autograder.zip", delete=True) as unzipped_dir:
self.assertDirsEqual(unzipped_dir, TEST_FILES_PATH + "autograder-token-correct")
def test_custom_env(self):
"""
Check that a custom environment.yml is correctly read and modified
"""
# create the zipfile
generate(
tests_dir = TEST_FILES_PATH + "tests",
output_path = TEST_FILES_PATH + "autograder.zip",
requirements = TEST_FILES_PATH + "requirements.txt",
environment = TEST_FILES_PATH + "environment.yml",
files = [TEST_FILES_PATH + "data/test-df.csv"],
)
with self.unzip_to_temp(TEST_FILES_PATH + "autograder.zip", delete=True) as unzipped_dir:
self.assertDirsEqual(unzipped_dir, TEST_FILES_PATH + "autograder-custom-env")
def test_lang_r(self):
"""
Check that a correct R autograder is built
"""
# create the zipfile
generate(
tests_dir = TEST_FILES_PATH + "tests",
output_path = TEST_FILES_PATH + "autograder.zip",
config = TEST_FILES_PATH + "r_otter_config.json",
no_environment = True,
)
with self.unzip_to_temp(TEST_FILES_PATH + "autograder.zip", delete=True) as unzipped_dir:
self.assertDirsEqual(unzipped_dir, TEST_FILES_PATH + "autograder-r-correct")
| 36.622222 | 97 | 0.622573 |
7946a9fe51587fb83d713e05d8e400391b306a17 | 30,690 | py | Python | addons/blender_mmd_tools-main/tests/test_pmx_exporter.py | V-Sekai/V-Sekai-Blender-tools | 3473ad4abb737756290a9007273519460742960d | [
"MIT"
] | 2 | 2021-12-21T16:38:58.000Z | 2022-01-08T00:56:35.000Z | addons/blender_mmd_tools-main/tests/test_pmx_exporter.py | V-Sekai/V-Sekai-Blender-game-tools | 3473ad4abb737756290a9007273519460742960d | [
"MIT"
] | 1 | 2022-01-29T05:46:50.000Z | 2022-01-29T05:46:50.000Z | addons/blender_mmd_tools-main/tests/test_pmx_exporter.py | V-Sekai/V-Sekai-Blender-game-tools | 3473ad4abb737756290a9007273519460742960d | [
"MIT"
] | 1 | 2021-11-07T19:41:34.000Z | 2021-11-07T19:41:34.000Z | # -*- coding: utf-8 -*-
import os
import shutil
import unittest
import bpy
from math import pi
from mathutils import Euler
from mathutils import Vector
from mmd_tools.core import pmx
from mmd_tools.core.model import Model
from mmd_tools.core.pmd.importer import import_pmd_to_pmx
from mmd_tools.core.pmx.importer import PMXImporter
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_DIR = os.path.join(os.path.dirname(TESTS_DIR), 'samples')
class TestPmxExporter(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''
Clean up output from previous tests
'''
output_dir = os.path.join(TESTS_DIR, 'output')
for item in os.listdir(output_dir):
if item.endswith('.OUTPUT'):
continue # Skip the placeholder
item_fp = os.path.join(output_dir, item)
if os.path.isfile(item_fp):
os.remove(item_fp)
elif os.path.isdir(item_fp):
shutil.rmtree(item_fp)
def setUp(self):
'''
'''
import logging
logger = logging.getLogger()
logger.setLevel('ERROR')
#********************************************
# Utils
#********************************************
def __axis_error(self, axis0, axis1):
return (Vector(axis0).normalized() - Vector(axis1).normalized()).length
def __vector_error(self, vec0, vec1):
return (Vector(vec0) - Vector(vec1)).length
def __quaternion_error(self, quat0, quat1):
angle = quat0.rotation_difference(quat1).angle % pi
assert(angle >= 0)
return min(angle, pi-angle)
#********************************************
# Header & Informations
#********************************************
def __check_pmx_header_info(self, source_model, result_model, import_types):
'''
Test pmx model info, header
'''
# Informations ================
self.assertEqual(source_model.name, result_model.name)
self.assertEqual(source_model.name_e, result_model.name_e)
self.assertEqual(source_model.comment.replace('\r', ''), result_model.comment.replace('\r', ''))
self.assertEqual(source_model.comment_e.replace('\r', ''), result_model.comment_e.replace('\r', ''))
# Header ======================
if source_model.header:
source_header = source_model.header
result_header = result_model.header
self.assertEqual(source_header.sign, result_header.sign)
self.assertEqual(source_header.version, result_header.version)
self.assertEqual(source_header.encoding.index, result_header.encoding.index)
self.assertEqual(source_header.encoding.charset, result_header.encoding.charset)
if 'MESH' in import_types:
self.assertEqual(source_header.additional_uvs, result_header.additional_uvs)
self.assertEqual(source_header.vertex_index_size, result_header.vertex_index_size)
self.assertEqual(source_header.texture_index_size, result_header.texture_index_size)
self.assertEqual(source_header.material_index_size, result_header.material_index_size)
if 'ARMATURE' in import_types:
self.assertEqual(source_header.bone_index_size, result_header.bone_index_size)
if 'MORPHS' in import_types:
self.assertEqual(source_header.morph_index_size, result_header.morph_index_size)
if 'PHYSICS' in import_types:
self.assertEqual(source_header.rigid_index_size, result_header.rigid_index_size)
#********************************************
# Mesh
#********************************************
def __get_pmx_textures(self, textures):
ret = []
for t in textures:
path = t.path
path = os.path.basename(path)
ret.append(path)
return ret
def __get_texture(self, tex_id, textures):
if 0 <= tex_id < len(textures):
return textures[tex_id]
return tex_id
def __get_toon_texture(self, tex_id, textures, is_shared):
return tex_id if is_shared else self.__get_texture(tex_id, textures)
def __check_pmx_mesh(self, source_model, result_model):
'''
Test pmx textures, materials, vertices, faces
'''
# textures ====================
# TODO
source_textures = self.__get_pmx_textures(source_model.textures)
result_textures = self.__get_pmx_textures(result_model.textures)
self.assertEqual(len(source_textures), len(result_textures))
for tex0, tex1 in zip(sorted(source_textures), sorted(result_textures)):
self.assertEqual(tex0, tex1)
# materials ===================
source_materials = source_model.materials
result_materials = result_model.materials
self.assertEqual(len(source_materials), len(result_materials))
for mat0, mat1 in zip(source_materials, result_materials):
msg = mat0.name
self.assertEqual(mat0.name, mat1.name, msg)
self.assertEqual(mat0.name_e, mat1.name_e, msg)
self.assertEqual(mat0.diffuse, mat1.diffuse, msg)
self.assertEqual(mat0.specular, mat1.specular, msg)
self.assertEqual(mat0.shininess, mat1.shininess, msg)
self.assertEqual(mat0.ambient, mat1.ambient, msg)
self.assertEqual(mat0.is_double_sided, mat1.is_double_sided, msg)
self.assertEqual(mat0.enabled_drop_shadow, mat1.enabled_drop_shadow, msg)
self.assertEqual(mat0.enabled_self_shadow_map, mat1.enabled_self_shadow_map, msg)
self.assertEqual(mat0.enabled_self_shadow, mat1.enabled_self_shadow, msg)
self.assertEqual(mat0.enabled_toon_edge, mat1.enabled_toon_edge, msg)
self.assertEqual(mat0.edge_color, mat1.edge_color, msg)
self.assertEqual(mat0.edge_size, mat1.edge_size, msg)
self.assertEqual(mat0.comment, mat1.comment, msg)
self.assertEqual(mat0.vertex_count, mat1.vertex_count, msg)
tex0 = self.__get_texture(mat0.texture, source_textures)
tex1 = self.__get_texture(mat1.texture, result_textures)
self.assertEqual(tex0, tex1, msg)
self.assertEqual(mat0.sphere_texture_mode, mat1.sphere_texture_mode, msg)
sph0 = self.__get_texture(mat0.sphere_texture, source_textures)
sph1 = self.__get_texture(mat1.sphere_texture, result_textures)
self.assertEqual(sph0, sph1, msg)
self.assertEqual(mat0.is_shared_toon_texture, mat1.is_shared_toon_texture, msg)
toon0 = self.__get_toon_texture(mat0.toon_texture, source_textures, mat0.is_shared_toon_texture)
toon1 = self.__get_toon_texture(mat1.toon_texture, result_textures, mat1.is_shared_toon_texture)
self.assertEqual(toon0, toon1, msg)
# vertices & faces ============
# TODO
source_vertices = source_model.vertices
result_vertices = result_model.vertices
#self.assertEqual(len(source_vertices), len(result_vertices))
source_faces = source_model.faces
result_faces = result_model.faces
self.assertEqual(len(source_faces), len(result_faces))
for f0, f1 in zip(source_faces, result_faces):
seq0 = [source_vertices[i] for i in f0]
seq1 = [result_vertices[i] for i in f1]
for v0, v1 in zip(seq0, seq1):
self.assertLess(self.__vector_error(v0.co, v1.co), 1e-6)
self.assertLess(self.__vector_error(v0.uv, v1.uv), 1e-6)
#self.assertLess(self.__vector_error(v0.normal, v1.normal), 1e-3)
self.assertEqual(v0.additional_uvs, v1.additional_uvs)
self.assertEqual(v0.edge_scale, v1.edge_scale)
#self.assertEqual(v0.weight.weights, v1.weight.weights)
#self.assertEqual(v0.weight.bones, v1.weight.bones)
#********************************************
# Armature
#********************************************
def __get_bone(self, bone_id, bones):
if bone_id is not None and 0 <= bone_id < len(bones):
return bones[bone_id]
return bone_id
def __get_bone_name(self, bone_id, bones):
if bone_id is not None and 0 <= bone_id < len(bones):
return bones[bone_id].name
return bone_id
def __get_bone_display_connection(self, bone, bones):
displayConnection = bone.displayConnection
if isinstance(displayConnection, int):
if displayConnection == -1:
return (0.0, 0.0, 0.0)
tail_bone = self.__get_bone(displayConnection, bones)
if self.__get_bone_name(tail_bone.parent, bones) == bone.name and not tail_bone.isMovable:
return tail_bone.name
return tuple(Vector(tail_bone.location) - Vector(bone.location))
return displayConnection
def __check_pmx_bones(self, source_model, result_model):
'''
Test pmx bones
'''
source_bones = source_model.bones
result_bones = result_model.bones
self.assertEqual(len(source_bones), len(result_bones))
# check bone order
bone_order0 = [x.name for x in source_bones]
bone_order1 = [x.name for x in result_bones]
self.assertEqual(bone_order0, bone_order1)
for bone0, bone1 in zip(source_bones, result_bones):
msg = bone0.name
self.assertEqual(bone0.name, bone1.name, msg)
self.assertEqual(bone0.name_e, bone1.name_e, msg)
self.assertLess(self.__vector_error(bone0.location, bone1.location), 1e-6, msg)
parent0 = self.__get_bone_name(bone0.parent, source_bones)
parent1 = self.__get_bone_name(bone1.parent, result_bones)
self.assertEqual(parent0, parent1, msg)
self.assertEqual(bone0.transform_order, bone1.transform_order, msg)
self.assertEqual(bone0.isRotatable, bone1.isRotatable, msg)
self.assertEqual(bone0.isMovable and not bone0.axis, bone1.isMovable, msg)
self.assertEqual(bone0.visible, bone1.visible, msg)
self.assertEqual(bone0.isControllable, bone1.isControllable, msg)
self.assertEqual(bone0.isIK, bone1.isIK, msg)
self.assertEqual(bone0.transAfterPhis, bone1.transAfterPhis, msg)
self.assertEqual(bone0.externalTransKey, bone1.externalTransKey, msg)
if bone0.axis and bone1.axis:
self.assertLess(self.__axis_error(bone0.axis, bone1.axis), 1e-6, msg)
else:
self.assertEqual(bone0.axis, bone1.axis, msg)
if bone0.localCoordinate and bone1.localCoordinate:
self.assertLess(self.__axis_error(bone0.localCoordinate.x_axis, bone1.localCoordinate.x_axis), 1e-6, msg)
self.assertLess(self.__axis_error(bone0.localCoordinate.z_axis, bone1.localCoordinate.z_axis), 1e-6, msg)
else:
self.assertEqual(bone0.localCoordinate, bone1.localCoordinate, msg)
self.assertEqual(bone0.hasAdditionalRotate, bone1.hasAdditionalRotate, msg)
self.assertEqual(bone0.hasAdditionalLocation, bone1.hasAdditionalLocation, msg)
if bone0.additionalTransform and bone1.additionalTransform:
at_target0, at_infl0 = bone0.additionalTransform
at_target1, at_infl1 = bone1.additionalTransform
at_target0 = self.__get_bone_name(at_target0, source_bones)
at_target1 = self.__get_bone_name(at_target1, result_bones)
self.assertEqual(at_target0, at_target1, msg)
self.assertLess(abs(at_infl0 - at_infl1), 1e-4, msg)
else:
self.assertEqual(bone0.additionalTransform, bone1.additionalTransform, msg)
target0 = self.__get_bone_name(bone0.target, source_bones)
target1 = self.__get_bone_name(bone1.target, result_bones)
self.assertEqual(target0, target1, msg)
self.assertEqual(bone0.loopCount, bone1.loopCount, msg)
self.assertEqual(bone0.rotationConstraint, bone1.rotationConstraint, msg)
self.assertEqual(len(bone0.ik_links), len(bone1.ik_links), msg)
for link0, link1 in zip(bone0.ik_links, bone1.ik_links):
target0 = self.__get_bone_name(link0.target, source_bones)
target1 = self.__get_bone_name(link1.target, result_bones)
self.assertEqual(target0, target1, msg)
maximumAngle0 = link0.maximumAngle
maximumAngle1 = link1.maximumAngle
if maximumAngle0 and maximumAngle1:
self.assertLess(self.__vector_error(maximumAngle0, maximumAngle1), 1e-6, msg)
else:
self.assertEqual(maximumAngle0, maximumAngle1, msg)
minimumAngle0 = link0.minimumAngle
minimumAngle1 = link1.minimumAngle
if minimumAngle0 and minimumAngle1:
self.assertLess(self.__vector_error(minimumAngle0, minimumAngle1), 1e-6, msg)
else:
self.assertEqual(minimumAngle0, minimumAngle1, msg)
for bone0, bone1 in zip(source_bones, result_bones):
msg = bone0.name
displayConnection0 = self.__get_bone_display_connection(bone0, source_bones)
displayConnection1 = self.__get_bone_display_connection(bone1, result_bones)
if not isinstance(displayConnection0, str) and not isinstance(displayConnection1, str):
self.assertLess(self.__vector_error(displayConnection0, displayConnection1), 1e-4, msg)
else:
self.assertEqual(displayConnection0, displayConnection1, msg)
#********************************************
# Physics
#********************************************
def __get_rigid_name(self, rigid_id, rigids):
if rigid_id is not None and 0 <= rigid_id < len(rigids):
return rigids[rigid_id].name
return rigid_id
def __check_pmx_physics(self, source_model, result_model):
'''
Test pmx rigids, joints
'''
# rigids ======================
source_rigids = source_model.rigids
result_rigids = result_model.rigids
self.assertEqual(len(source_rigids), len(result_rigids))
source_bones = source_model.bones
result_bones = result_model.bones
for rigid0, rigid1 in zip(source_rigids, result_rigids):
msg = rigid0.name
self.assertEqual(rigid0.name, rigid1.name, msg)
self.assertEqual(rigid0.name_e, rigid1.name_e, msg)
bone0 = self.__get_bone_name(rigid0.bone, source_bones)
bone1 = self.__get_bone_name(rigid0.bone, source_bones)
self.assertEqual(bone0, bone1, msg)
self.assertEqual(rigid0.collision_group_number, rigid1.collision_group_number, msg)
self.assertEqual(rigid0.collision_group_mask, rigid1.collision_group_mask, msg)
self.assertEqual(rigid0.type, rigid1.type, msg)
if rigid0.type == 0: # SHAPE_SPHERE
self.assertLess(abs(rigid0.size[0]-rigid1.size[0]), 1e-6, msg)
elif rigid0.type == 1: # SHAPE_BOX
self.assertLess(self.__vector_error(rigid0.size, rigid1.size), 1e-6, msg)
elif rigid0.type == 2: # SHAPE_CAPSULE
self.assertLess(self.__vector_error(rigid0.size[0:2], rigid1.size[0:2]), 1e-6, msg)
self.assertLess(self.__vector_error(rigid0.location, rigid1.location), 1e-6, msg)
rigid0_rotation = Euler(rigid0.rotation,'YXZ').to_quaternion()
rigid1_rotation = Euler(rigid1.rotation,'YXZ').to_quaternion()
self.assertLess(self.__quaternion_error(rigid0_rotation, rigid1_rotation), 1e-6, msg)
self.assertEqual(rigid0.mass, rigid1.mass, msg)
self.assertEqual(rigid0.velocity_attenuation, rigid1.velocity_attenuation, msg)
self.assertEqual(rigid0.rotation_attenuation, rigid1.rotation_attenuation, msg)
self.assertEqual(rigid0.bounce, rigid1.bounce, msg)
self.assertEqual(rigid0.friction, rigid1.friction, msg)
self.assertEqual(rigid0.mode, rigid1.mode, msg)
# joints ======================
source_joints = source_model.joints
result_joints = result_model.joints
self.assertEqual(len(source_joints), len(result_joints))
for joint0, joint1 in zip(source_joints, result_joints):
msg = joint0.name
self.assertEqual(joint0.name, joint1.name, msg)
self.assertEqual(joint0.name_e, joint1.name_e, msg)
self.assertEqual(joint0.mode, joint1.mode, msg)
src_rigid0 = self.__get_rigid_name(joint0.src_rigid, source_rigids)
src_rigid1 = self.__get_rigid_name(joint1.src_rigid, result_rigids)
self.assertEqual(src_rigid0, src_rigid1, msg)
dest_rigid0 = self.__get_rigid_name(joint0.dest_rigid, source_rigids)
dest_rigid1 = self.__get_rigid_name(joint1.dest_rigid, result_rigids)
self.assertEqual(dest_rigid0, dest_rigid1, msg)
self.assertEqual(joint0.location, joint1.location, msg)
joint0_rotation = Euler(joint0.rotation,'YXZ').to_quaternion()
joint1_rotation = Euler(joint1.rotation,'YXZ').to_quaternion()
self.assertLess(self.__quaternion_error(joint0_rotation, joint1_rotation), 1e-6, msg)
self.assertLess(self.__vector_error(joint0.maximum_location, joint1.maximum_location), 1e-6, msg)
self.assertLess(self.__vector_error(joint0.minimum_location, joint1.minimum_location), 1e-6, msg)
self.assertEqual(joint0.maximum_rotation, joint1.maximum_rotation, msg)
self.assertEqual(joint0.minimum_rotation, joint1.minimum_rotation, msg)
self.assertEqual(joint0.spring_constant, joint1.spring_constant, msg)
self.assertEqual(joint0.spring_rotation_constant, joint1.spring_rotation_constant, msg)
#********************************************
# Morphs
#********************************************
def __get_material(self, index, materials):
if 0 <= index < len(materials):
return materials[index]
class _dummy:
name = None
return _dummy
def __check_pmx_morphs(self, source_model, result_model):
'''
Test pmx morphs
'''
source_morphs = source_model.morphs
result_morphs = result_model.morphs
self.assertEqual(len(source_morphs), len(result_morphs))
source_table = {}
for m in source_morphs:
source_table.setdefault(type(m), []).append(m)
result_table = {}
for m in result_morphs:
result_table.setdefault(type(m), []).append(m)
self.assertEqual(source_table.keys(), result_table.keys(), 'types mismatch')
#source_vertices = source_model.vertices
#result_vertices = result_model.vertices
# VertexMorph =================
# TODO
source = source_table.get(pmx.VertexMorph, [])
result = result_table.get(pmx.VertexMorph, [])
self.assertEqual(len(source), len(result))
for m0, m1 in zip(source, result):
msg = 'VertexMorph %s'%m0.name
self.assertEqual(m0.name, m1.name, msg)
self.assertEqual(m0.name_e, m1.name_e, msg)
self.assertEqual(m0.category, m1.category, msg)
#self.assertEqual(len(m0.offsets), len(m1.offsets), msg)
# UVMorph =====================
# TODO
source = source_table.get(pmx.UVMorph, [])
result = result_table.get(pmx.UVMorph, [])
self.assertEqual(len(source), len(result))
for m0, m1 in zip(source, result):
msg = 'UVMorph %s'%m0.name
self.assertEqual(m0.name, m1.name, msg)
self.assertEqual(m0.name_e, m1.name_e, msg)
self.assertEqual(m0.category, m1.category, msg)
self.assertEqual(len(m0.offsets), len(m1.offsets), msg)
#for s0, s1 in zip(m0.offsets, m1.offsets):
# self.assertEqual(s0.index, s1.index, msg)
# self.assertEqual(s0.offset, s1.offset, msg)
# BoneMorph ===================
source_bones = source_model.bones
result_bones = result_model.bones
source = source_table.get(pmx.BoneMorph, [])
result = result_table.get(pmx.BoneMorph, [])
self.assertEqual(len(source), len(result))
for m0, m1 in zip(source, result):
msg = 'BoneMorph %s'%m0.name
self.assertEqual(m0.name, m1.name, msg)
self.assertEqual(m0.name_e, m1.name_e, msg)
self.assertEqual(m0.category, m1.category, msg)
# the source may contains invalid data
source_offsets = [m for m in m0.offsets if 0 <= m.index < len(source_bones)]
result_offsets = m1.offsets
self.assertEqual(len(source_offsets), len(result_offsets), msg)
for s0, s1 in zip(source_offsets, result_offsets):
bone0 = source_bones[s0.index]
bone1 = result_bones[s1.index]
self.assertEqual(bone0.name, bone1.name, msg)
self.assertLess(self.__vector_error(s0.location_offset, s1.location_offset), 1e-5, msg)
self.assertLess(self.__vector_error(s0.rotation_offset, s1.rotation_offset), 1e-5, msg)
# MaterialMorph ===============
source_materials = source_model.materials
result_materials = result_model.materials
source = source_table.get(pmx.MaterialMorph, [])
result = result_table.get(pmx.MaterialMorph, [])
self.assertEqual(len(source), len(result))
for m0, m1 in zip(source, result):
msg = 'MaterialMorph %s'%m0.name
self.assertEqual(m0.name, m1.name, msg)
self.assertEqual(m0.name_e, m1.name_e, msg)
self.assertEqual(m0.category, m1.category, msg)
source_offsets = m0.offsets
result_offsets = m1.offsets
self.assertEqual(len(source_offsets), len(result_offsets), msg)
for s0, s1 in zip(source_offsets, result_offsets):
mat0 = self.__get_material(s0.index, source_materials)
mat1 = self.__get_material(s1.index, result_materials)
self.assertEqual(mat0.name, mat1.name, msg)
self.assertEqual(s0.offset_type, s1.offset_type, msg)
self.assertEqual(s0.diffuse_offset, s1.diffuse_offset, msg)
self.assertEqual(s0.specular_offset, s1.specular_offset, msg)
self.assertEqual(s0.shininess_offset, s1.shininess_offset, msg)
self.assertEqual(s0.ambient_offset, s1.ambient_offset, msg)
self.assertEqual(s0.edge_color_offset, s1.edge_color_offset, msg)
self.assertEqual(s0.edge_size_offset, s1.edge_size_offset, msg)
self.assertEqual(s0.texture_factor, s1.texture_factor, msg)
self.assertEqual(s0.sphere_texture_factor, s1.sphere_texture_factor, msg)
self.assertEqual(s0.toon_texture_factor, s1.toon_texture_factor, msg)
# GroupMorph ==================
source = source_table.get(pmx.GroupMorph, [])
result = result_table.get(pmx.GroupMorph, [])
self.assertEqual(len(source), len(result))
for m0, m1 in zip(source, result):
msg = 'GroupMorph %s'%m0.name
self.assertEqual(m0.name, m1.name, msg)
self.assertEqual(m0.name_e, m1.name_e, msg)
self.assertEqual(m0.category, m1.category, msg)
# the source may contains invalid data
source_offsets = [m for m in m0.offsets if 0 <= m.morph < len(source_morphs)]
result_offsets = m1.offsets
self.assertEqual(len(source_offsets), len(result_offsets), msg)
for s0, s1 in zip(source_offsets, result_offsets):
morph0 = source_morphs[s0.morph]
morph1 = result_morphs[s1.morph]
self.assertEqual(morph0.name, morph1.name, msg)
self.assertEqual(morph0.category, morph1.category, msg)
self.assertEqual(s0.factor, s1.factor, msg)
#********************************************
# Display
#********************************************
def __check_pmx_display_data(self, source_model, result_model, check_morphs):
'''
Test pmx display
'''
source_display = source_model.display
result_display = result_model.display
self.assertEqual(len(source_display), len(result_display))
for source, result in zip(source_display, result_display):
self.assertEqual(source.name, result.name)
self.assertEqual(source.name_e, result.name_e)
self.assertEqual(source.isSpecial, result.isSpecial)
source_items = source.data
if not check_morphs:
source_items = [i for i in source_items if i[0] == 0]
result_items = result.data
self.assertEqual(len(source_items), len(result_items))
for item0, item1 in zip(source_items, result_items):
disp_type0, index0 = item0
disp_type1, index1 = item1
self.assertEqual(disp_type0, disp_type1)
if disp_type0 == 0:
bone_name0 = source_model.bones[index0].name
bone_name1 = result_model.bones[index1].name
self.assertEqual(bone_name0, bone_name1)
elif disp_type0 == 1:
morph0 = source_model.morphs[index0]
morph1 = result_model.morphs[index1]
self.assertEqual(morph0.name, morph1.name)
self.assertEqual(morph0.category, morph1.category)
#********************************************
# Test Function
#********************************************
def __get_import_types(self, types):
types = types.copy()
if 'PHYSICS' in types:
types.add('ARMATURE')
if 'DISPLAY' in types:
types.add('ARMATURE')
if 'MORPHS' in types:
types.add('ARMATURE')
types.add('MESH')
return types
def __list_sample_files(self, file_types):
ret = []
for file_type in file_types:
file_ext ='.' + file_type
for root, dirs, files in os.walk(os.path.join(SAMPLES_DIR, file_type)):
for name in files:
if name.lower().endswith(file_ext):
ret.append(os.path.join(root, name))
return ret
def __enable_mmd_tools(self):
bpy.ops.wm.read_homefile() # reload blender startup file
pref = getattr(bpy.context, 'preferences', None) or bpy.context.user_preferences
if not pref.addons.get('mmd_tools', None):
addon_enable = bpy.ops.wm.addon_enable if 'addon_enable' in dir(bpy.ops.wm) else bpy.ops.preferences.addon_enable
addon_enable(module='mmd_tools') # make sure addon 'mmd_tools' is enabled
def test_pmx_exporter(self):
'''
'''
input_files = self.__list_sample_files(('pmd', 'pmx'))
if len(input_files) < 1:
self.fail('required pmd/pmx sample file(s)!')
check_types = set()
check_types.add('MESH')
check_types.add('ARMATURE')
check_types.add('PHYSICS')
check_types.add('MORPHS')
check_types.add('DISPLAY')
import_types = self.__get_import_types(check_types)
print('\n Check: %s | Import: %s'%(str(check_types), str(import_types)))
for test_num, filepath in enumerate(input_files):
print('\n - %2d/%d | filepath: %s'%(test_num+1, len(input_files), filepath))
try:
self.__enable_mmd_tools()
file_loader = pmx.load
if filepath.lower().endswith('.pmd'):
file_loader = import_pmd_to_pmx
source_model = file_loader(filepath)
PMXImporter().execute(
pmx=source_model,
types=import_types,
scale=1,
clean_model=False,
)
#bpy.context.scene.update()
bpy.context.scene.frame_set(bpy.context.scene.frame_current)
except Exception:
self.fail('Exception happened during import %s'%filepath)
else:
try:
output_pmx = os.path.join(TESTS_DIR, 'output', '%d.pmx'%test_num)
bpy.ops.mmd_tools.export_pmx(
filepath=output_pmx,
scale=1,
copy_textures=False,
sort_materials=False,
log_level='ERROR',
)
except Exception:
self.fail('Exception happened during export %s'%output_pmx)
else:
self.assertTrue(os.path.isfile(output_pmx), 'File was not created') # Is this a race condition?
try:
result_model = pmx.load(output_pmx)
except:
self.fail('Failed to load output file %s'%output_pmx)
self.__check_pmx_header_info(source_model, result_model, import_types)
if 'MESH' in check_types:
self.__check_pmx_mesh(source_model, result_model)
if 'ARMATURE' in check_types:
self.__check_pmx_bones(source_model, result_model)
if 'PHYSICS' in check_types:
self.__check_pmx_physics(source_model, result_model)
if 'MORPHS' in check_types:
self.__check_pmx_morphs(source_model, result_model)
if 'DISPLAY' in check_types:
self.__check_pmx_display_data(source_model, result_model, 'MORPHS' in check_types)
if __name__ == '__main__':
import sys
sys.argv = [__file__] + (sys.argv[sys.argv.index('--') + 1:] if '--' in sys.argv else [])
unittest.main()
| 45.874439 | 125 | 0.60971 |
7946aa0c0eef12a1b63de123544f0ad719074538 | 2,653 | py | Python | wagtail/wagtailsnippets/edit_handlers.py | minervaproject/wagtail | b94b1179fbf2019d809fd69ed62cd1a096b5d407 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailsnippets/edit_handlers.py | minervaproject/wagtail | b94b1179fbf2019d809fd69ed62cd1a096b5d407 | [
"BSD-3-Clause"
] | 1 | 2016-04-20T00:54:15.000Z | 2016-04-20T00:54:15.000Z | wagtail/wagtailsnippets/edit_handlers.py | minervaproject/wagtail | b94b1179fbf2019d809fd69ed62cd1a096b5d407 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from wagtail.utils.deprecation import RemovedInWagtail16Warning
from wagtail.wagtailadmin.edit_handlers import BaseChooserPanel
from wagtail.wagtailcore.utils import resolve_model_string
from .widgets import AdminSnippetChooser
class BaseSnippetChooserPanel(BaseChooserPanel):
object_type_name = 'item'
_target_model = None
@classmethod
def widget_overrides(cls):
return {cls.field_name: AdminSnippetChooser(model=cls.target_model())}
@classmethod
def target_model(cls):
if cls._target_model is None:
if cls.snippet_type:
# RemovedInWagtail16Warning: The target_model is automatically
# detected from the relation, so snippet_type is deprecated.
try:
cls._target_model = resolve_model_string(cls.snippet_type)
except LookupError:
raise ImproperlyConfigured(
"{0}.snippet_type must be of the form 'app_label.model_name', given {1!r}"
.format(cls.__name__, cls.snippet_type)
)
except ValueError:
raise ImproperlyConfigured(
"{0}.snippet_type refers to model {1!r} that has not been installed"
.format(cls.__name__, cls.snippet_type)
)
else:
cls._target_model = cls.model._meta.get_field(cls.field_name).rel.model
return cls._target_model
def render_as_field(self):
instance_obj = self.get_chosen_item()
return mark_safe(render_to_string(self.field_template, {
'field': self.bound_field,
self.object_type_name: instance_obj,
}))
class SnippetChooserPanel(object):
def __init__(self, field_name, snippet_type=None):
self.field_name = field_name
if snippet_type is not None:
warnings.warn(
'The snippet_type argument to SnippetChooserPanel is deprecated. '
'The related model is now automatically detected.',
RemovedInWagtail16Warning)
self.snippet_type = snippet_type
def bind_to_model(self, model):
return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {
'model': model,
'field_name': self.field_name,
'snippet_type': self.snippet_type,
})
| 36.847222 | 98 | 0.648323 |
7946aae40299cf0f493e5e92bbe69db907942624 | 7,732 | py | Python | jaxnerf/nerf/models.py | muell-monster/google-research | 04d2024f4723bc4be3d639a668c19fb1f6a31478 | [
"Apache-2.0"
] | 1 | 2021-07-02T07:29:04.000Z | 2021-07-02T07:29:04.000Z | jaxnerf/nerf/models.py | thomascherickal/google-research | 294a888bbb6678ac255c6422fd703c325cbb0772 | [
"Apache-2.0"
] | null | null | null | jaxnerf/nerf/models.py | thomascherickal/google-research | 294a888bbb6678ac255c6422fd703c325cbb0772 | [
"Apache-2.0"
] | 1 | 2021-09-27T03:17:14.000Z | 2021-09-27T03:17:14.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Different model implementation plus a general port for all the models."""
from flax import nn
import jax.numpy as jnp
from jaxnerf.nerf import model_utils
def get_model(key, args):
return model_dict[args.model](key, args)
class NerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs."""
def apply(self, key_0, key_1, rays, n_samples, n_fine_samples, use_viewdirs,
near, far, noise_std, net_depth, net_width, net_depth_condition,
net_width_condition, activation, skip_layer, alpha_channel,
rgb_channel, randomized, white_bkgd, deg_point, deg_view, lindisp):
"""Nerf Model.
Args:
key_0: jnp.ndarray, random number generator for coarse model sampling.
key_1: jnp.ndarray, random number generator for fine model sampling.
rays: jnp.ndarray(float32), [batch_size, 6/9], each ray is a 6-d vector
where the first 3 dimensions represent the ray origin and the last 3
dimensions represent the unormalized ray direction. Note that if ndc
rays are used, rays are 9-d where the extra 3-dimensional vector is the
view direction before transformed to ndc rays.
n_samples: int, the number of samples for coarse nerf.
n_fine_samples: int, the number of samples for fine nerf.
use_viewdirs: bool, use viewdirs as a condition.
near: float, near clip.
far: float, far clip.
noise_std: float, std dev of noise added to regularize sigma output.
net_depth: int, the depth of the first part of MLP.
net_width: int, the width of the first part of MLP.
net_depth_condition: int, the depth of the second part of MLP.
net_width_condition: int, the width of the second part of MLP.
activation: function, the activation function used in the MLP.
skip_layer: int, add a skip connection to the output vector of every
skip_layer layers.
alpha_channel: int, the number of alpha_channels.
rgb_channel: int, the number of rgb_channels.
randomized: bool, use randomized stratified sampling.
white_bkgd: bool, use white background.
deg_point: degree of positional encoding for positions.
deg_view: degree of positional encoding for viewdirs.
lindisp: bool, sampling linearly in disparity rather than depth if true.
Returns:
ret: list, [(rgb, disp, acc), (rgb_coarse, disp_coarse, acc_coarse)]
"""
# Extract viewdirs from the ray array
if rays.shape[-1] > 6: # viewdirs different from rays_d
viewdirs = rays[Ellipsis, -3:]
rays = rays[Ellipsis, :-3]
else: # viewdirs are normalized rays_d
viewdirs = rays[Ellipsis, 3:6]
# Stratified sampling along rays
z_vals, samples = model_utils.sample_along_rays(key_0, rays, n_samples,
near, far, randomized,
lindisp)
samples = model_utils.posenc(samples, deg_point)
# Point attribute predictions
if use_viewdirs:
norms = jnp.linalg.norm(viewdirs, axis=-1, keepdims=True)
viewdirs = viewdirs / norms
viewdirs = model_utils.posenc(viewdirs, deg_view)
raw = model_utils.MLP(
samples, viewdirs, net_depth=net_depth, net_width=net_width,
net_depth_condition=net_depth_condition,
net_width_condition=net_width_condition,
activation=activation, skip_layer=skip_layer,
alpha_channel=alpha_channel, rgb_channel=rgb_channel,
)
else:
raw = model_utils.MLP(
samples, net_depth=net_depth, net_width=net_width,
net_depth_condition=net_depth_condition,
net_width_condition=net_width_condition,
activation=activation, skip_layer=skip_layer,
alpha_channel=alpha_channel, rgb_channel=rgb_channel,
)
# Add noises to regularize the density predictions if needed
raw = model_utils.noise_regularize(key_0, raw, noise_std, randomized)
# Volumetric rendering.
rgb, disp, acc, weights = model_utils.volumetric_rendering(
raw,
z_vals,
rays[Ellipsis, 3:6],
white_bkgd=white_bkgd,
)
ret = [
(rgb, disp, acc),
]
# Hierarchical sampling based on coarse predictions
if n_fine_samples > 0:
z_vals_mid = .5 * (z_vals[Ellipsis, 1:] + z_vals[Ellipsis, :-1])
z_vals, samples = model_utils.sample_pdf(
key_1,
z_vals_mid,
weights[Ellipsis, 1:-1],
rays,
z_vals,
n_fine_samples,
randomized,
)
samples = model_utils.posenc(samples, deg_point)
if use_viewdirs:
raw = model_utils.MLP(samples, viewdirs)
else:
raw = model_utils.MLP(samples)
raw = model_utils.noise_regularize(key_1, raw, noise_std, randomized)
rgb, disp, acc, unused_weights = model_utils.volumetric_rendering(
raw,
z_vals,
rays[Ellipsis, 3:6],
white_bkgd=white_bkgd,
)
ret.append((rgb, disp, acc))
return ret
def nerf(key, args):
"""Neural Randiance Field.
Args:
key: jnp.ndarray. Random number generator.
args: FLAGS class. Hyperparameters of nerf.
Returns:
model: nn.Model. Nerf model with parameters.
state: flax.Module.state. Nerf model state for stateful parameters.
"""
deg_point = args.deg_point
deg_view = args.deg_view
n_samples = args.n_samples
n_fine_samples = args.n_fine_samples
use_viewdirs = args.use_viewdirs
near = args.near
far = args.far
noise_std = args.noise_std
randomized = args.randomized
white_bkgd = args.white_bkgd
net_depth = args.net_depth
net_width = args.net_width
net_depth_condition = args.net_depth_condition
net_width_condition = args.net_width_condition
if args.activation == "relu":
activation = nn.relu
else:
raise NotImplementedError("Invalid choice of activation {}".format(
args.activation))
skip_layer = args.skip_layer
alpha_channel = args.alpha_channel
rgb_channel = args.rgb_channel
lindisp = args.lindisp
ray_shape = (args.batch_size, 6 if args.dataset != "llff" else 9)
model_fn = NerfModel.partial(
n_samples=n_samples,
n_fine_samples=n_fine_samples,
use_viewdirs=use_viewdirs,
near=near,
far=far,
noise_std=noise_std,
net_depth=net_depth,
net_width=net_width,
net_depth_condition=net_depth_condition,
net_width_condition=net_width_condition,
activation=activation,
skip_layer=skip_layer,
alpha_channel=alpha_channel,
rgb_channel=rgb_channel,
randomized=randomized,
white_bkgd=white_bkgd,
deg_point=deg_point,
deg_view=deg_view,
lindisp=lindisp)
with nn.stateful() as init_state:
unused_outspec, init_params = model_fn.init_by_shape(
key,
[
(key.shape, key.dtype),
(key.shape, key.dtype),
(ray_shape, jnp.float32),
],
)
model = nn.Model(model_fn, init_params)
return model, init_state
model_dict = {
"nerf": nerf,
}
| 36.471698 | 79 | 0.678996 |
7946ab514da23177c8fa136ae47580349c8d5e7a | 19,752 | py | Python | networking_hyperv/neutron/agent/layer2.py | vaibhavjaiman/networking-hyperv | 1f6462a76e7c3f8fc57bf7b1d7d0fc3d39910935 | [
"Apache-2.0"
] | 14 | 2015-10-18T02:55:08.000Z | 2019-01-28T22:05:28.000Z | networking_hyperv/neutron/agent/layer2.py | vaibhavjaiman/networking-hyperv | 1f6462a76e7c3f8fc57bf7b1d7d0fc3d39910935 | [
"Apache-2.0"
] | 2 | 2021-03-01T06:13:30.000Z | 2021-05-09T06:13:28.000Z | networking_hyperv/neutron/agent/layer2.py | vaibhavjaiman/networking-hyperv | 1f6462a76e7c3f8fc57bf7b1d7d0fc3d39910935 | [
"Apache-2.0"
] | 18 | 2016-02-03T17:26:34.000Z | 2021-02-28T13:36:22.000Z | # Copyright 2017 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains all the available contract classes."""
import abc
import collections
import re
import eventlet
from eventlet import tpool
from neutron.agent import rpc as agent_rpc
from neutron_lib.agent import topics
from neutron_lib import constants as n_const
from neutron_lib import rpc as n_rpc
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_service import loopingcall
import six
from networking_hyperv.common.i18n import _, _LI, _LE # noqa
from networking_hyperv.neutron import _common_utils as c_util
from networking_hyperv.neutron.agent import base as base_agent
from networking_hyperv.neutron import config
from networking_hyperv.neutron import constants
from networking_hyperv.neutron import exception
LOG = logging.getLogger(__name__)
CONF = config.CONF
_synchronized = lockutils.synchronized_with_prefix('n-hv-agent-')
_port_synchronized = c_util.get_port_synchronized_decorator('n-hv-agent-')
class Layer2Agent(base_agent.BaseAgent):
"""Contract class for all the layer two agents."""
_AGENT_TOPIC = n_const.L2_AGENT_TOPIC
_OVS_EXT_NAME_RE = re.compile(r'.*((open.?v.?switch)|(ovs)).*',
re.IGNORECASE)
def __init__(self):
super(Layer2Agent, self).__init__()
self._network_vswitch_map = {}
# The following sets contain ports that are to be processed.
self._added_ports = set()
self._removed_ports = set()
# The following sets contain ports that have been processed.
self._bound_ports = set()
self._unbound_ports = set()
self._physical_network_mappings = collections.OrderedDict()
self._consumers = []
self._event_callback_pairs = []
# Setup the current agent.
self._setup()
self._set_agent_state()
self._setup_rpc()
def _setup(self):
"""Setup the layer two agent."""
agent_config = CONF.get("AGENT", {})
self._worker_count = agent_config.get('worker_count')
self._phys_net_map = agent_config.get(
'physical_network_vswitch_mappings', [])
self._local_network_vswitch = agent_config.get(
'local_network_vswitch')
self._load_physical_network_mappings(self._phys_net_map)
self._validate_vswitches()
self._endpoints.append(self)
self._event_callback_pairs.extend([
(self._utils.EVENT_TYPE_CREATE, self._process_added_port_event),
(self._utils.EVENT_TYPE_DELETE, self._process_removed_port_event)
])
tpool.set_num_threads(self._worker_count)
def _setup_qos_extension(self):
"""Setup the QOS extension if it is required."""
pass
def _setup_rpc(self):
"""Setup the RPC client for the current agent."""
self._plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self._state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self._client = n_rpc.get_client(self.target)
self._consumers.extend([
[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE],
[topics.PORT, topics.DELETE]
])
self._connection = agent_rpc.create_consumers(
self._endpoints, self._topic, self._consumers,
start_listening=False
)
self._setup_qos_extension()
self._connection.consume_in_threads()
report_interval = CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _process_added_port_event(self, port_name):
"""Callback for added ports."""
LOG.info("Hyper-V VM vNIC added: %s", port_name)
self._added_ports.add(port_name)
def _process_removed_port_event(self, port_name):
LOG.info("Hyper-V VM vNIC removed: %s", port_name)
self._removed_ports.add(port_name)
def _load_physical_network_mappings(self, phys_net_vswitch_mappings):
"""Load all the information regarding the physical network."""
for mapping in phys_net_vswitch_mappings:
parts = mapping.split(':')
if len(parts) != 2:
LOG.debug('Invalid physical network mapping: %s', mapping)
else:
pattern = re.escape(parts[0].strip()).replace('\\*', '.*')
pattern = pattern + '$'
vswitch = parts[1].strip()
self._physical_network_mappings[pattern] = vswitch
def _validate_vswitches(self):
vswitch_names = list(self._physical_network_mappings.values())
if self._local_network_vswitch:
vswitch_names.append(self._local_network_vswitch)
vswitches_valid = True
for vswitch_name in vswitch_names:
try:
self._validate_vswitch(vswitch_name)
except exception.ValidationError:
# We're validating all the vSwitches before erroring out.
LOG.error("Validating vSwitch %s failed", vswitch_name)
vswitches_valid = False
# We're currently stopping the service if any of the configured
# vSwitches are unavailable.
if not vswitches_valid:
err_msg = _("Validating one or more configured vSwitches failed.")
raise exception.ValidationError(err_msg)
elif not vswitch_names:
err_msg = _("No vSwitch configured.")
raise exception.ValidationError(err_msg)
def _validate_vswitch(self, vswitch_name):
try:
vswitch_extensions = self._utils.get_vswitch_extensions(
vswitch_name)
except os_win_exc.HyperVvSwitchNotFound as exc:
raise exception.ValidationError(exc.message)
for ext in vswitch_extensions:
if (self._is_ovs_extension(ext) and
ext['enabled_state'] == os_win_const.CIM_STATE_ENABLED):
err_msg = _("The Open vSwitch extension is enabled on the "
"'%s' vSwitch. For this reason, this agent "
"cannot use the specified vSwitch.")
raise exception.ValidationError(err_msg % vswitch_name)
def _is_ovs_extension(self, vswitch_extension):
# The OVS extension name keeps changing, while some vendors
# redistribute it under a different name.
return bool(self._OVS_EXT_NAME_RE.match(vswitch_extension['name']))
def _get_vswitch_name(self, network_type, physical_network):
"""Get the vswitch name for the received network information."""
if network_type != constants.TYPE_LOCAL:
vswitch_name = self._get_vswitch_for_physical_network(
physical_network)
else:
vswitch_name = self._local_network_vswitch
if vswitch_name:
return vswitch_name
err_msg = _("No vSwitch configured for physical network "
"'%(physical_network)s'. Neutron network type: "
"'%(network_type)s'.")
raise exception.NetworkingHyperVException(
err_msg % dict(physical_network=physical_network,
network_type=network_type))
def _get_vswitch_for_physical_network(self, phys_network_name):
"""Get the vswitch name for the received network name."""
for pattern in self._physical_network_mappings:
if phys_network_name is None:
phys_network_name = ''
if re.match(pattern, phys_network_name):
return self._physical_network_mappings[pattern]
def _get_network_vswitch_map_by_port_id(self, port_id):
"""Get the vswitch name for the received port id."""
for network_id, vswitch in six.iteritems(self._network_vswitch_map):
if port_id in vswitch['ports']:
return (network_id, vswitch)
# If the port was not found, just return (None, None)
return (None, None)
def _update_port_status_cache(self, device, device_bound=True):
"""Update the ports status cache."""
with self._cache_lock:
if device_bound:
self._bound_ports.add(device)
self._unbound_ports.discard(device)
else:
self._bound_ports.discard(device)
self._unbound_ports.add(device)
def _create_event_listeners(self):
"""Create and bind the event listeners."""
LOG.debug("Create the event listeners.")
for event_type, callback in self._event_callback_pairs:
LOG.debug("Create listener for %r event", event_type)
listener = self._utils.get_vnic_event_listener(event_type)
eventlet.spawn_n(listener, callback)
def _prologue(self):
"""Executed once before the daemon loop."""
self._added_ports = self._utils.get_vnic_ids()
self._create_event_listeners()
def _reclaim_local_network(self, net_uuid):
LOG.info("Reclaiming local network %s", net_uuid)
del self._network_vswitch_map[net_uuid]
def _port_bound(self, port_id, network_id, network_type, physical_network,
segmentation_id, port_security_enabled, set_port_sriov):
"""Bind the port to the recived network."""
LOG.debug("Binding port %s", port_id)
if network_id not in self._network_vswitch_map:
self._provision_network(
port_id, network_id, network_type,
physical_network, segmentation_id)
vswitch_map = self._network_vswitch_map[network_id]
vswitch_map['ports'].append(port_id)
LOG.debug("Trying to connect the current port to vswitch %r.",
vswitch_map['vswitch_name'])
self._utils.connect_vnic_to_vswitch(
vswitch_name=vswitch_map['vswitch_name'],
switch_port_name=port_id,
)
if set_port_sriov:
LOG.debug("Enabling SR-IOV for port: %s", port_id)
self._utils.set_vswitch_port_sriov(port_id, True)
def _port_unbound(self, port_id, vnic_deleted=False):
LOG.debug("Trying to unbind the port %r", port_id)
vswitch = self._get_network_vswitch_map_by_port_id(port_id)
net_uuid, vswitch_map = vswitch
if not net_uuid:
LOG.debug('Port %s was not found on this agent.', port_id)
return
LOG.debug("Unbinding port %s", port_id)
self._utils.remove_switch_port(port_id, vnic_deleted)
vswitch_map['ports'].remove(port_id)
if not vswitch_map['ports']:
self._reclaim_local_network(net_uuid)
def _process_added_port(self, device_details):
# NOTE(claudiub): A port requiring SR-IOV will specify a PCI slot.
set_port_sriov = 'pci_slot' in device_details.get('profile', {})
self._treat_vif_port(
port_id=device_details['port_id'],
network_id=device_details['network_id'],
network_type=device_details['network_type'],
physical_network=device_details['physical_network'],
segmentation_id=device_details['segmentation_id'],
admin_state_up=device_details['admin_state_up'],
port_security_enabled=device_details['port_security_enabled'],
set_port_sriov=set_port_sriov)
def process_added_port(self, device_details):
"""Process the new ports.
Wraps _process_added_port, and treats the sucessful and exception
cases.
"""
device = device_details['device']
port_id = device_details['port_id']
reprocess = True
try:
self._process_added_port(device_details)
LOG.debug("Updating cached port %s status as UP.", port_id)
self._update_port_status_cache(device, device_bound=True)
LOG.info("Port %s processed.", port_id)
except os_win_exc.HyperVvNicNotFound:
LOG.debug('vNIC %s not found. This can happen if the VM was '
'destroyed.', port_id)
reprocess = False
except os_win_exc.HyperVPortNotFoundException:
LOG.debug('vSwitch port %s not found. This can happen if the VM '
'was destroyed.', port_id)
# NOTE(claudiub): just to be on the safe side, in case Hyper-V said
# that the port was added, but it hasn't really, we're leaving
# reprocess = True. If the VM / vNIC was removed, on the next
# reprocess, a HyperVvNicNotFound will be raised.
except Exception as ex:
# NOTE(claudiub): in case of a non-transient error, the port will
# be processed over and over again, and will not be reported as
# bound (e.g.: InvalidParameterValue when setting QoS), until the
# port is deleted. These issues have to be investigated and solved
LOG.exception("Exception encountered while processing "
"port %(port_id)s. Exception: %(ex)s",
dict(port_id=port_id, ex=ex))
else:
# no exception encountered, no need to reprocess.
reprocess = False
if reprocess:
# Readd the port as "added", so it can be reprocessed.
self._added_ports.add(device)
# Force cache refresh.
self._refresh_cache = True
return False
return True
def _treat_devices_added(self):
"""Process the new devices."""
try:
devices_details_list = self._plugin_rpc.get_devices_details_list(
self._context, self._added_ports, self._agent_id, self._host)
except Exception as exc:
LOG.debug("Unable to get ports details for "
"devices %(devices)s: %(exc)s",
{'devices': self._added_ports, 'exc': exc})
return
for device_details in devices_details_list:
device = device_details['device']
LOG.info("Adding port %s", device)
if 'port_id' in device_details:
LOG.info("Port %(device)s updated. "
"Details: %(device_details)s",
{'device': device, 'device_details': device_details})
eventlet.spawn_n(self.process_added_port, device_details)
else:
LOG.debug("Missing port_id from device details: "
"%(device)s. Details: %(device_details)s",
{'device': device, 'device_details': device_details})
LOG.debug("Remove the port from added ports set, so it "
"doesn't get reprocessed.")
self._added_ports.discard(device)
def _process_removed_port(self, device):
"""Process the removed ports."""
LOG.debug("Trying to remove the port %r", device)
self._update_port_status_cache(device, device_bound=False)
self._port_unbound(device, vnic_deleted=True)
LOG.debug("The port was successfully removed.")
self._removed_ports.discard(device)
def _treat_devices_removed(self):
"""Process the removed devices."""
for device in self._removed_ports.copy():
eventlet.spawn_n(self._process_removed_port, device)
@_synchronized('n-plugin-notifier')
def _notify_plugin_on_port_updates(self):
if not (self._bound_ports or self._unbound_ports):
return
with self._cache_lock:
bound_ports = self._bound_ports.copy()
unbound_ports = self._unbound_ports.copy()
self._plugin_rpc.update_device_list(
self._context, list(bound_ports), list(unbound_ports),
self._agent_id, self._host)
with self._cache_lock:
self._bound_ports = self._bound_ports.difference(bound_ports)
self._unbound_ports = self._unbound_ports.difference(
unbound_ports)
def _work(self):
"""Process the information regarding the available ports."""
if self._refresh_cache:
# Inconsistent cache might cause exceptions. For example,
# if a port has been removed, it will be known in the next
# loop. Using the old switch port can cause exceptions.
LOG.debug("Refreshing os_win caches...")
self._utils.update_cache()
self._refresh_cache = False
if self._bound_ports or self._unbound_ports:
eventlet.spawn_n(self._notify_plugin_on_port_updates)
# notify plugin about port deltas
if self._added_ports:
LOG.debug("Agent loop has new devices!")
self._treat_devices_added()
if self._removed_ports:
LOG.debug("Agent loop has lost devices...")
self._treat_devices_removed()
def port_update(self, context, port=None, network_type=None,
segmentation_id=None, physical_network=None):
LOG.debug("port_update received: %s", port['id'])
if self._utils.vnic_port_exists(port['id']):
self._treat_vif_port(
port_id=port['id'],
network_id=port['network_id'],
network_type=network_type,
physical_network=physical_network,
segmentation_id=segmentation_id,
admin_state_up=port['admin_state_up'],
port_security_enabled=port['port_security_enabled'],
)
else:
LOG.debug("No port %s defined on agent.", port['id'])
def port_delete(self, context, port_id=None):
"""Delete the received port."""
LOG.debug("port_delete event received for %r", port_id)
def network_delete(self, context, network_id=None):
LOG.debug("network_delete received. "
"Deleting network %s", network_id)
# The network may not be defined on this agent
if network_id in self._network_vswitch_map:
self._reclaim_local_network(network_id)
else:
LOG.debug("Network %s not defined on agent.", network_id)
@abc.abstractmethod
def _provision_network(self, port_id, net_uuid, network_type,
physical_network, segmentation_id):
"""Provision the network with the received information."""
pass
@_port_synchronized
def _treat_vif_port(self, port_id, network_id, network_type,
physical_network, segmentation_id,
admin_state_up, port_security_enabled,
set_port_sriov=False):
if admin_state_up:
self._port_bound(port_id, network_id, network_type,
physical_network, segmentation_id,
port_security_enabled, set_port_sriov)
else:
self._port_unbound(port_id)
| 40.979253 | 79 | 0.638264 |
7946ab56412a91399c1b1bad47a07dc5c7f96a8f | 19,861 | py | Python | manila/tests/api/v1/test_security_service.py | kpawar89/manila | d487c2db728cedf8357b9f4acbc0a45c21c3a83e | [
"Apache-2.0"
] | 159 | 2015-01-02T09:35:15.000Z | 2022-01-04T11:51:34.000Z | manila/tests/api/v1/test_security_service.py | kpawar89/manila | d487c2db728cedf8357b9f4acbc0a45c21c3a83e | [
"Apache-2.0"
] | 5 | 2015-07-24T09:28:21.000Z | 2020-11-20T04:33:51.000Z | manila/tests/api/v1/test_security_service.py | kpawar89/manila | d487c2db728cedf8357b9f4acbc0a45c21c3a83e | [
"Apache-2.0"
] | 128 | 2015-01-05T22:52:28.000Z | 2021-12-29T14:00:58.000Z | # Copyright 2012 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from urllib import parse
import ddt
import webob
from manila.api.v1 import security_service
from manila.common import constants
from manila import db
from manila import exception
from manila import test
from manila.tests.api import fakes
@ddt.ddt
class ShareApiTest(test.TestCase):
"""Share Api Test."""
def setUp(self):
super(ShareApiTest, self).setUp()
self.controller = security_service.SecurityServiceController()
self.maxDiff = None
self.ss_active_directory = {
"created_at": "fake-time",
"updated_at": "fake-time-2",
"id": 1,
"name": "fake-name",
"description": "Fake Security Service Desc",
"type": constants.SECURITY_SERVICES_ALLOWED_TYPES[0],
"dns_ip": "1.1.1.1",
"server": "fake-server",
"domain": "fake-domain",
"user": "fake-user",
"password": "fake-password",
"status": constants.STATUS_NEW,
"project_id": "fake",
}
self.ss_ldap = {
"created_at": "fake-time",
"updated_at": "fake-time-2",
"id": 2,
"name": "ss-ldap",
"description": "Fake Security Service Desc",
"type": constants.SECURITY_SERVICES_ALLOWED_TYPES[1],
"dns_ip": "2.2.2.2",
"server": "test-server",
"domain": "test-domain",
"user": "test-user",
"password": "test-password",
"status": "active",
"project_id": "fake",
}
self.valid_search_opts = {
'user': 'fake-user',
'server': 'fake-server',
'dns_ip': '1.1.1.1',
'domain': 'fake-domain',
'type': constants.SECURITY_SERVICES_ALLOWED_TYPES[0],
}
self.check_policy_patcher = mock.patch(
'manila.api.v1.security_service.policy.check_policy')
self.check_policy_patcher.start()
self.addCleanup(self._stop_started_patcher, self.check_policy_patcher)
self.security_service_list_expected_resp = {
'security_services': [{
'id': self.ss_active_directory['id'],
'name': self.ss_active_directory['name'],
'type': self.ss_active_directory['type'],
'status': self.ss_active_directory['status']
}, ]
}
self.fake_share_network_list_with_share_servers = [{
'id': 'fake_sn_id',
'share_network_subnets': [{
'id': 'fake_sns_id',
'share_servers': [{'id': 'fake_ss_id'}]
}]
}]
self.fake_share_network_list_without_share_servers = [{
'id': 'fake_sn_id',
'share_network_subnets': [{
'id': 'fake_sns_id',
'share_servers': []
}]
}]
def _stop_started_patcher(self, patcher):
if hasattr(patcher, 'is_local'):
patcher.stop()
def test_security_service_show(self):
db.security_service_get = mock.Mock(
return_value=self.ss_active_directory)
req = fakes.HTTPRequest.blank('/security-services/1')
res_dict = self.controller.show(req, '1')
expected = self.ss_active_directory.copy()
expected.update()
self.assertEqual({'security_service': self.ss_active_directory},
res_dict)
def test_security_service_show_not_found(self):
db.security_service_get = mock.Mock(side_effect=exception.NotFound)
req = fakes.HTTPRequest.blank('/shares/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req, '1')
def test_security_service_create(self):
sec_service = self.ss_active_directory.copy()
create_stub = mock.Mock(
return_value=sec_service)
self.mock_object(db, 'security_service_create', create_stub)
req = fakes.HTTPRequest.blank('/security-services')
res_dict = self.controller.create(
req, {"security_service": sec_service})
expected = self.ss_active_directory.copy()
self.assertEqual({'security_service': expected}, res_dict)
def test_security_service_create_invalid_types(self):
sec_service = self.ss_active_directory.copy()
sec_service['type'] = 'invalid'
req = fakes.HTTPRequest.blank('/security-services')
self.assertRaises(exception.InvalidInput, self.controller.create, req,
{"security_service": sec_service})
def test_create_security_service_no_body(self):
body = {}
req = fakes.HTTPRequest.blank('/security-services')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create,
req,
body)
def test_security_service_delete(self):
db.security_service_delete = mock.Mock()
db.security_service_get = mock.Mock()
db.share_network_get_all_by_security_service = mock.Mock(
return_value=[])
req = fakes.HTTPRequest.blank('/security_services/1')
resp = self.controller.delete(req, 1)
db.security_service_delete.assert_called_once_with(
req.environ['manila.context'], 1)
self.assertEqual(202, resp.status_int)
def test_security_service_delete_not_found(self):
db.security_service_get = mock.Mock(side_effect=exception.NotFound)
req = fakes.HTTPRequest.blank('/security_services/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
1)
def test_security_service_delete_has_share_networks(self):
db.security_service_get = mock.Mock()
db.share_network_get_all_by_security_service = mock.Mock(
return_value=[{'share_network': 'fake_share_network'}])
req = fakes.HTTPRequest.blank('/security_services/1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
req, 1)
def test_security_service_update_name(self):
new = self.ss_active_directory.copy()
updated = self.ss_active_directory.copy()
updated['name'] = 'new'
self.mock_object(security_service.policy, 'check_policy')
db.security_service_get = mock.Mock(return_value=new)
db.security_service_update = mock.Mock(return_value=updated)
fake_sns = {'id': 'fake_sns_id', 'share_servers': ['fake_ss']}
db.share_network_get_all_by_security_service = mock.Mock(
return_value=[{
'id': 'fake_id',
'share_network_subnets': [fake_sns]
}])
body = {"security_service": {"name": "new"}}
req = fakes.HTTPRequest.blank('/security_service/1')
res_dict = self.controller.update(req, 1, body)['security_service']
self.assertEqual(updated['name'], res_dict['name'])
db.share_network_get_all_by_security_service.assert_called_once_with(
req.environ['manila.context'], 1)
self.assertEqual(2, security_service.policy.check_policy.call_count)
security_service.policy.check_policy.assert_has_calls([
mock.call(req.environ['manila.context'],
security_service.RESOURCE_NAME, 'update', new)
])
def test_security_service_update_description(self):
new = self.ss_active_directory.copy()
updated = self.ss_active_directory.copy()
updated['description'] = 'new'
self.mock_object(security_service.policy, 'check_policy')
db.security_service_get = mock.Mock(return_value=new)
db.security_service_update = mock.Mock(return_value=updated)
fake_sns = {'id': 'fake_sns_id', 'share_servers': ['fake_ss']}
db.share_network_get_all_by_security_service = mock.Mock(
return_value=[{
'id': 'fake_id',
'share_network_subnets': [fake_sns]
}])
body = {"security_service": {"description": "new"}}
req = fakes.HTTPRequest.blank('/security_service/1')
res_dict = self.controller.update(req, 1, body)['security_service']
self.assertEqual(updated['description'], res_dict['description'])
db.share_network_get_all_by_security_service.assert_called_once_with(
req.environ['manila.context'], 1)
self.assertEqual(2, security_service.policy.check_policy.call_count)
security_service.policy.check_policy.assert_has_calls([
mock.call(req.environ['manila.context'],
security_service.RESOURCE_NAME, 'update', new)
])
@mock.patch.object(db, 'security_service_get', mock.Mock())
@mock.patch.object(db, 'share_network_get_all_by_security_service',
mock.Mock())
def test_security_service_update_invalid_keys_sh_server_exists(self):
self.mock_object(security_service.policy, 'check_policy')
fake_sns = {'id': 'fake_sns_id', 'share_servers': ['fake_ss']}
db.share_network_get_all_by_security_service.return_value = [
{'id': 'fake_id', 'share_network_subnets': [fake_sns]},
]
db.security_service_get.return_value = self.ss_active_directory.copy()
body = {'security_service': {'user_id': 'new_user'}}
req = fakes.HTTPRequest.blank('/security_services/1')
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
req, 1, body)
db.security_service_get.assert_called_once_with(
req.environ['manila.context'], 1)
db.share_network_get_all_by_security_service.assert_called_once_with(
req.environ['manila.context'], 1)
self.assertEqual(1, security_service.policy.check_policy.call_count)
security_service.policy.check_policy.assert_has_calls([
mock.call(req.environ['manila.context'],
security_service.RESOURCE_NAME, 'update',
db.security_service_get.return_value)
])
@mock.patch.object(db, 'security_service_get', mock.Mock())
@mock.patch.object(db, 'security_service_update', mock.Mock())
@mock.patch.object(db, 'share_network_get_all_by_security_service',
mock.Mock())
def test_security_service_update_valid_keys_sh_server_exists(self):
self.mock_object(security_service.policy, 'check_policy')
fake_sns = {'id': 'fake_sns_id', 'share_servers': ['fake_ss']}
db.share_network_get_all_by_security_service.return_value = [
{'id': 'fake_id', 'share_network_subnets': [fake_sns]},
]
old = self.ss_active_directory.copy()
updated = self.ss_active_directory.copy()
updated['name'] = 'new name'
updated['description'] = 'new description'
db.security_service_get.return_value = old
db.security_service_update.return_value = updated
body = {
'security_service': {
'description': 'new description',
'name': 'new name',
},
}
req = fakes.HTTPRequest.blank('/security_services/1')
res_dict = self.controller.update(req, 1, body)['security_service']
self.assertEqual(updated['description'], res_dict['description'])
self.assertEqual(updated['name'], res_dict['name'])
db.security_service_get.assert_called_once_with(
req.environ['manila.context'], 1)
db.share_network_get_all_by_security_service.assert_called_once_with(
req.environ['manila.context'], 1)
db.security_service_update.assert_called_once_with(
req.environ['manila.context'], 1, body['security_service'])
self.assertEqual(2, security_service.policy.check_policy.call_count)
security_service.policy.check_policy.assert_has_calls([
mock.call(req.environ['manila.context'],
security_service.RESOURCE_NAME, 'update', old)
])
@mock.patch.object(db, 'security_service_get', mock.Mock())
def test_security_service_update_has_share_servers(self):
db.security_service_get = mock.Mock()
self.mock_object(
self.controller, '_share_servers_dependent_on_sn_exist',
mock.Mock(return_value=True))
body = {"security_service": {"type": "ldap"}}
req = fakes.HTTPRequest.blank('/security_services/1')
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update,
req,
1,
body)
@ddt.data(True, False)
def test_security_service_update_share_server_dependent_exists(self,
expected):
req = fakes.HTTPRequest.blank('/security_services/1')
context = req.environ['manila.context']
db.security_service_get = mock.Mock()
network = (self.fake_share_network_list_with_share_servers if expected
else self.fake_share_network_list_without_share_servers)
db.share_network_get_all_by_security_service = mock.Mock(
return_value=network)
result = self.controller._share_servers_dependent_on_sn_exist(
context, 'fake_id')
self.assertEqual(expected, result)
def test_security_service_list(self):
db.security_service_get_all_by_project = mock.Mock(
return_value=[self.ss_active_directory.copy()])
req = fakes.HTTPRequest.blank('/security_services')
res_dict = self.controller.index(req)
self.assertEqual(self.security_service_list_expected_resp, res_dict)
@mock.patch.object(db, 'share_network_get', mock.Mock())
def test_security_service_list_filter_by_sn(self):
sn = {
'id': 'fake_sn_id',
'security_services': [self.ss_active_directory, ],
}
db.share_network_get.return_value = sn
req = fakes.HTTPRequest.blank(
'/security-services?share_network_id=fake_sn_id')
res_dict = self.controller.index(req)
self.assertEqual(self.security_service_list_expected_resp, res_dict)
db.share_network_get.assert_called_once_with(
req.environ['manila.context'],
sn['id'])
@mock.patch.object(db, 'security_service_get_all', mock.Mock())
def test_security_services_list_all_tenants_admin_context(self):
self.check_policy_patcher.stop()
db.security_service_get_all.return_value = [
self.ss_active_directory,
self.ss_ldap,
]
req = fakes.HTTPRequest.blank(
'/security-services?all_tenants=1&name=fake-name',
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(self.security_service_list_expected_resp, res_dict)
db.security_service_get_all.assert_called_once_with(
req.environ['manila.context'])
@mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock())
def test_security_services_list_all_tenants_non_admin_context(self):
db.security_service_get_all_by_project.return_value = []
req = fakes.HTTPRequest.blank(
'/security-services?all_tenants=1')
fake_context = req.environ['manila.context']
self.controller.index(req)
db.security_service_get_all_by_project.assert_called_once_with(
fake_context, fake_context.project_id
)
@mock.patch.object(db, 'security_service_get_all', mock.Mock())
def test_security_services_list_all_tenants_with_invalid_value(self):
req = fakes.HTTPRequest.blank(
'/security-services?all_tenants=nerd',
use_admin_context=True)
self.assertRaises(exception.InvalidInput, self.controller.index, req)
@mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock())
def test_security_services_list_all_tenants_with_value_zero(self):
db.security_service_get_all_by_project.return_value = []
req = fakes.HTTPRequest.blank(
'/security-services?all_tenants=0',
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual({'security_services': []}, res_dict)
db.security_service_get_all_by_project.assert_called_once_with(
req.environ['manila.context'],
req.environ['manila.context'].project_id)
@mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock())
def test_security_services_list_admin_context_invalid_opts(self):
db.security_service_get_all_by_project.return_value = [
self.ss_active_directory,
self.ss_ldap,
]
req = fakes.HTTPRequest.blank(
'/security-services?fake_opt=fake_value',
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual({'security_services': []}, res_dict)
db.security_service_get_all_by_project.assert_called_once_with(
req.environ['manila.context'],
req.environ['manila.context'].project_id)
@mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock())
def test_security_service_list_all_filter_opts_separately(self):
db.security_service_get_all_by_project.return_value = [
self.ss_active_directory,
self.ss_ldap,
]
for opt, val in self.valid_search_opts.items():
for use_admin_context in [True, False]:
req = fakes.HTTPRequest.blank(
'/security-services?' + opt + '=' + val,
use_admin_context=use_admin_context)
res_dict = self.controller.index(req)
self.assertEqual(self.security_service_list_expected_resp,
res_dict)
db.security_service_get_all_by_project.assert_called_with(
req.environ['manila.context'],
req.environ['manila.context'].project_id)
@mock.patch.object(db, 'security_service_get_all_by_project', mock.Mock())
def test_security_service_list_all_filter_opts(self):
db.security_service_get_all_by_project.return_value = [
self.ss_active_directory,
self.ss_ldap,
]
query_string = '/security-services?' + parse.urlencode(sorted(
[(k, v) for (k, v) in list(self.valid_search_opts.items())]))
for use_admin_context in [True, False]:
req = fakes.HTTPRequest.blank(query_string,
use_admin_context=use_admin_context)
res_dict = self.controller.index(req)
self.assertEqual(self.security_service_list_expected_resp,
res_dict)
db.security_service_get_all_by_project.assert_called_with(
req.environ['manila.context'],
req.environ['manila.context'].project_id)
| 45.448513 | 78 | 0.636272 |
7946acc2388304fdfb6caf49e55542368561af96 | 245 | py | Python | 2_Baekjoon/7_practice.py | YOON-CC/Baekjoon | 608daac0b294f0273f706eff83d7abcb47815097 | [
"MIT"
] | null | null | null | 2_Baekjoon/7_practice.py | YOON-CC/Baekjoon | 608daac0b294f0273f706eff83d7abcb47815097 | [
"MIT"
] | null | null | null | 2_Baekjoon/7_practice.py | YOON-CC/Baekjoon | 608daac0b294f0273f706eff83d7abcb47815097 | [
"MIT"
] | null | null | null | #백준 1149
n=int(input())
a=[list(map(int,input().split())) for _ in range(n)]
for i in range(1,n):
a[i][0]+=min(a[i-1][1],a[i-1][2])
a[i][1]+=min(a[i-1][0],a[i-1][2])
a[i][2]+=min(a[i-1][0],a[i-1][1])
print(a)
print(min(a[n-1])) | 20.416667 | 52 | 0.489796 |
7946ad0c8b69f2fb349f003fa8f368c5ef072927 | 2,044 | py | Python | calibration/src/calibration/calibration/publish_camera_info.py | sieniven/detect-obstacles-ros | 2fe2e502781060da83e4840538c779808553dbfb | [
"Apache-2.0"
] | 6 | 2021-04-26T15:11:24.000Z | 2022-01-05T23:10:43.000Z | calibration/src/calibration/calibration/publish_camera_info.py | sieniven/detect-obstacles-ros | 2fe2e502781060da83e4840538c779808553dbfb | [
"Apache-2.0"
] | null | null | null | calibration/src/calibration/calibration/publish_camera_info.py | sieniven/detect-obstacles-ros | 2fe2e502781060da83e4840538c779808553dbfb | [
"Apache-2.0"
] | null | null | null | # basic Python frameworks
import argparse
import yaml
import numpy as np
# ROS2 libraries
import rclpy
# import messages
from sensor_msgs.msg import CameraInfo
def main():
"""
main function to publish camera info ros2 messages
"""
rclpy.init(args=None)
parser = argparse.ArgumentParser()
parser.add_argument('--topic-to-publish', action="store",
help="ROS topic to publish the camera info", required=True, type=str)
topic_to_publish = parser.parse_args().topic_to_publish
if "left" in topic_to_publish:
identifier = "left"
else:
identifier = "right"
node = rclpy.create_node('%s_camera_info_publisher' % identifier)
publisher = node.create_publisher(CameraInfo, topic_to_publish, 10)
msg = CameraInfo()
with open(r'/home/garuda/dev_ws/src/copilot_daa_calibration/data/intrinsic/camera_info.yaml') \
as config_file:
camera_info_file_path = yaml.load(config_file, Loader=yaml.FullLoader)["config"]["intrinsic"]
with open(camera_info_file_path, 'r') as camera_info_file:
camera_info = yaml.load(camera_info_file, Loader=yaml.FullLoader)
msg.height = camera_info["image"]["height"]
msg.width = camera_info["image"]["width"]
msg.distortion_model = "plumb bob"
msg.d = camera_info["distortion"][identifier][0]
msg.k = np.array(camera_info["intrinsic"][identifier]).flatten().tolist()
msg.r = np.array(camera_info["rectification"][identifier]).flatten().tolist()
msg.p = np.array(camera_info["projection_mtx"][identifier]).flatten().tolist()
frame_id = 1
def timer_callback():
nonlocal frame_id
msg.header.stamp = rclpy.clock.Clock().now().to_msg()
msg.header.frame_id = str(frame_id)
publisher.publish(msg)
frame_id += 1
timer_period = 0.5
timer = node.create_timer(timer_period, timer_callback)
rclpy.spin(node)
node.destroy_timer(timer)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 31.9375 | 101 | 0.683953 |
7946ad790478927b894a9cfe94d86022ebe4a01e | 7,946 | py | Python | base_python/cogment_verse/model_registry_client.py | kharyal/cogment-verse | 12bcb855bc742e3ec4ed11c40a1b475e95a32515 | [
"Apache-2.0"
] | null | null | null | base_python/cogment_verse/model_registry_client.py | kharyal/cogment-verse | 12bcb855bc742e3ec4ed11c40a1b475e95a32515 | [
"Apache-2.0"
] | null | null | null | base_python/cogment_verse/model_registry_client.py | kharyal/cogment-verse | 12bcb855bc742e3ec4ed11c40a1b475e95a32515 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 AI Redefined Inc. <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import io
import logging
import time
import grpc.aio
from google.protobuf.json_format import MessageToDict
from cogment.api.model_registry_pb2 import (
CreateOrUpdateModelRequest,
CreateVersionRequestChunk,
ModelInfo,
ModelVersionInfo,
RetrieveModelsRequest,
RetrieveVersionDataRequest,
RetrieveVersionInfosRequest,
)
from cogment.api.model_registry_pb2_grpc import ModelRegistrySPStub
from cogment_verse.utils import LRU
from prometheus_client import Summary
MODEL_REGISTRY_PUBLISH_VERSION_TIME = Summary(
"model_registry_publish_version_seconds",
"Time spent serializing and sending the model to the registry",
["model_id"],
)
MODEL_REGISTRY_RETRIEVE_VERSION_TIME = Summary(
"model_registry_retrieve_version_seconds",
"Time spent retrieving and deserializing the agent model version from the registry",
["model_id", "cached"],
)
log = logging.getLogger(__name__)
class ModelRegistryClient:
def __init__(self, endpoint, cache=LRU()):
channel = grpc.aio.insecure_channel(endpoint)
self._stub = ModelRegistrySPStub(channel)
self._cache = cache
@staticmethod
def _build_model_version_data_cache_key(data_hash):
return f"model_version_data_{data_hash}"
@staticmethod
def _build_model_info_cache_key(model_id):
return f"model_info_{model_id}"
async def create_model(self, model_id, model_user_data=None):
"""
Create a new model in the model registry
Parameters:
model_id (string): The model id
model_user_data (dict[str, str] - optional): model user data
"""
model_user_data_str = {}
if model_user_data:
for key, value in model_user_data.items():
model_user_data_str[key] = str(value)
model_info = ModelInfo(model_id=model_id, user_data=model_user_data_str)
req = CreateOrUpdateModelRequest(model_info=model_info)
await self._stub.CreateOrUpdateModel(req)
self._cache[self._build_model_info_cache_key(model_id)] = model_info
async def retrieve_model_info(self, model_id):
"""
Retrieve the given's model information
Parameters:
model_id (string): The model id
Returns
model_info (dict): The information of the model
"""
cache_key = self._build_model_info_cache_key(model_id)
if cache_key not in self._cache:
req = RetrieveModelsRequest(model_ids=[model_id])
rep = await self._stub.RetrieveModels(req)
model_info = rep.model_infos[0]
self._cache[cache_key] = model_info
return MessageToDict(self._cache[cache_key], preserving_proto_field_name=True)
async def publish_version(self, model_id, model, save_model, archived=False, **kwargs):
"""
Publish a new version of the model
Parameters:
model_id (string): Unique id of the model
model (ModelT): The model
save_model (f(ModelT, dict[str, str], BinaryIO, **kwargs) -> dict[str, str]): A function able to save the model, returning version_user_data
archive (bool - default is False): If true, the model version will be archived (i.e. stored in permanent storage)
kwargs: any number of key/values parameters, forwarded to `save_model`
Returns
version_info (dict): The information of the published version
"""
model_info = await self.retrieve_model_info(model_id)
model_user_data = model_info["user_data"]
def generate_chunks():
try:
with io.BytesIO() as model_data_io:
version_user_data = save_model(model, model_user_data, model_data_io, **kwargs)
version_data = model_data_io.getvalue()
version_info = ModelVersionInfo(model_id=model_id, archived=archived, data_size=len(version_data))
for key, value in version_user_data.items():
version_info.user_data[key] = str(value)
yield CreateVersionRequestChunk(header=CreateVersionRequestChunk.Header(version_info=version_info))
chunksize = 2 * 1024 * 1024 # 2MB to keep under well under the GRPC 4MB
while version_data:
yield CreateVersionRequestChunk(
body=CreateVersionRequestChunk.Body(data_chunk=version_data[:chunksize])
)
version_data = version_data[chunksize:]
except Exception as error:
log.error("Error while generating model version chunk", exc_info=error)
raise error
with MODEL_REGISTRY_PUBLISH_VERSION_TIME.labels(model_id=model_id).time():
rep = await self._stub.CreateVersion(generate_chunks())
cache_key = self._build_model_version_data_cache_key(rep.version_info.data_hash)
self._cache[cache_key] = model
return MessageToDict(rep.version_info, preserving_proto_field_name=True)
async def retrieve_version(self, model_id, load_model, version_number=-1, **kwargs):
"""
Retrieve a version of the model
Parameters:
model_id (string): Unique id of the model
load_model (f(string, int, dict[str, str], dict[str, str], BinaryIO)): A function able to load the model
version_number (int - default is -1): The version number (-1 for the latest)
kwargs: any number of key/values parameters, forwarded to `load_model`
Returns
model, model_info, version_info (ModelT, dict[str, str], dict[str, str]): A tuple containing the model version data, the model info and the model version info
"""
start_time = time.time()
# First retrieve the model info and model version info
async def retrieve_version_info(model_id, version_number):
req = RetrieveVersionInfosRequest(model_id=model_id, version_numbers=[version_number])
rep = await self._stub.RetrieveVersionInfos(req)
version_info_pb = rep.version_infos[0]
version_info = MessageToDict(version_info_pb, preserving_proto_field_name=True)
return version_info
[model_info, version_info] = await asyncio.gather(
self.retrieve_model_info(model_id), retrieve_version_info(model_id, version_number)
)
cache_key = self._build_model_version_data_cache_key(version_info["data_hash"])
cached = cache_key in self._cache
# Check if the model version data is already in memory, if not retrieve
if not cached:
req = RetrieveVersionDataRequest(model_id=model_id, version_number=version_info["version_number"])
data = b""
async for chunk in self._stub.RetrieveVersionData(req):
data += chunk.data_chunk
model = load_model(
model_id, version_number, model_info["user_data"], version_info["user_data"], io.BytesIO(data), **kwargs
)
self._cache[cache_key] = model
MODEL_REGISTRY_RETRIEVE_VERSION_TIME.labels(model_id=model_id, cached=cached).observe(time.time() - start_time)
return self._cache[cache_key], model_info, version_info
| 40.131313 | 170 | 0.679713 |
7946ae510ee16ccd1a48c745d68e8645cabcce00 | 23,939 | py | Python | bauh/gems/web/environment.py | Flash1232/bauh | 6f65556c05ae272c1dbbd557c7f80a606658eb56 | [
"Zlib"
] | 507 | 2019-08-12T16:15:55.000Z | 2022-03-28T15:49:39.000Z | bauh/gems/web/environment.py | Flash1232/bauh | 6f65556c05ae272c1dbbd557c7f80a606658eb56 | [
"Zlib"
] | 176 | 2019-08-14T02:35:21.000Z | 2022-03-31T21:43:56.000Z | bauh/gems/web/environment.py | Flash1232/bauh | 6f65556c05ae272c1dbbd557c7f80a606658eb56 | [
"Zlib"
] | 57 | 2019-09-02T04:09:22.000Z | 2022-03-21T21:37:16.000Z | import glob
import logging
import os
import shutil
import tarfile
import traceback
from datetime import datetime, timedelta
from pathlib import Path
from threading import Thread
from typing import Dict, List, Optional
import requests
import yaml
from bauh.api.abstract.download import FileDownloader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.view import MessageType
from bauh.api.http import HttpClient
from bauh.commons import system
from bauh.commons.html import bold
from bauh.commons.system import SimpleProcess, ProcessHandler
from bauh.gems.web import ENV_PATH, NODE_DIR_PATH, NODE_BIN_PATH, NODE_MODULES_PATH, NATIVEFIER_BIN_PATH, \
ELECTRON_PATH, ELECTRON_DOWNLOAD_URL, ELECTRON_SHA256_URL, URL_ENVIRONMENT_SETTINGS, NPM_BIN_PATH, NODE_PATHS, \
nativefier, ELECTRON_WIDEVINE_URL, ELECTRON_WIDEVINE_SHA256_URL, \
ENVIRONMENT_SETTINGS_CACHED_FILE, ENVIRONMENT_SETTINGS_TS_FILE, get_icon_path, NATIVEFIER_BASE_URL
from bauh.gems.web.model import WebApplication
from bauh.view.util.translation import I18n
class EnvironmentComponent:
def __init__(self, id: str, name: str, size: str, version: str, url: str, update: bool = False, properties: Optional[dict] = None):
self.id = id
self.name = name
self.size = size
self.version = version
self.url = url
self.update = update
self.properties = properties
class EnvironmentUpdater:
def __init__(self, logger: logging.Logger, http_client: HttpClient, file_downloader: FileDownloader, i18n: I18n, taskman: Optional[TaskManager] = None):
self.logger = logger
self.file_downloader = file_downloader
self.i18n = i18n
self.http_client = http_client
self.task_read_settings_id = 'web_read_settings'
self.taskman = taskman
def _download_and_install(self, version: str, version_url: str, watcher: ProcessWatcher) -> bool:
self.logger.info("Downloading NodeJS {}: {}".format(version, version_url))
tarf_path = '{}/{}'.format(ENV_PATH, version_url.split('/')[-1])
downloaded = self.file_downloader.download(version_url, watcher=watcher, output_path=tarf_path, cwd=ENV_PATH)
if not downloaded:
self.logger.error("Could not download '{}'. Aborting...".format(version_url))
return False
else:
try:
tf = tarfile.open(tarf_path)
tf.extractall(path=ENV_PATH)
extracted_file = '{}/{}'.format(ENV_PATH, tf.getnames()[0])
if os.path.exists(NODE_DIR_PATH):
self.logger.info("Removing old NodeJS version installation dir -> {}".format(NODE_DIR_PATH))
try:
shutil.rmtree(NODE_DIR_PATH)
except:
self.logger.error("Could not delete old NodeJS version dir -> {}".format(NODE_DIR_PATH))
traceback.print_exc()
return False
try:
os.rename(extracted_file, NODE_DIR_PATH)
except:
self.logger.error("Could not rename the NodeJS version file {} as {}".format(extracted_file, NODE_DIR_PATH))
traceback.print_exc()
return False
if os.path.exists(NODE_MODULES_PATH):
self.logger.info('Deleting {}'.format(NODE_MODULES_PATH))
try:
shutil.rmtree(NODE_MODULES_PATH)
except:
self.logger.error("Could not delete the directory {}".format(NODE_MODULES_PATH))
return False
return True
except:
self.logger.error('Could not extract {}'.format(tarf_path))
traceback.print_exc()
return False
finally:
if os.path.exists(tarf_path):
try:
os.remove(tarf_path)
except:
self.logger.error('Could not delete file {}'.format(tarf_path))
def check_node_installed(self, version: str) -> bool:
if not os.path.exists(NODE_DIR_PATH):
return False
else:
installed_version = system.run_cmd('{} --version'.format(NODE_BIN_PATH), print_error=False)
if installed_version:
installed_version = installed_version.strip()
if installed_version.startswith('v'):
installed_version = installed_version[1:]
self.logger.info('Node versions: installed ({}), cloud ({})'.format(installed_version, version))
if version != installed_version:
self.logger.info("The NodeJs installed version is different from the Cloud.")
return False
else:
self.logger.info("Node is already up to date")
return True
else:
self.logger.warning("Could not determine the current NodeJS installed version")
return False
def update_node(self, version: str, version_url: str, watcher: ProcessWatcher = None) -> bool:
Path(ENV_PATH).mkdir(parents=True, exist_ok=True)
if not os.path.exists(NODE_DIR_PATH):
return self._download_and_install(version=version, version_url=version_url, watcher=watcher)
else:
installed_version = system.run_cmd('{} --version'.format(NODE_BIN_PATH), print_error=False)
if installed_version:
installed_version = installed_version.strip()
if installed_version.startswith('v'):
installed_version = installed_version[1:]
self.logger.info('Node versions: installed ({}), cloud ({})'.format(installed_version, version))
if version != installed_version:
self.logger.info("The NodeJs installed version is different from the Cloud.")
return self._download_and_install(version=version, version_url=version_url, watcher=watcher)
else:
self.logger.info("Node is already up to date")
return True
else:
self.logger.warning("Could not determine the current NodeJS installed version")
self.logger.info("Removing {}".format(NODE_DIR_PATH))
try:
shutil.rmtree(NODE_DIR_PATH)
return self._download_and_install(version=version, version_url=version_url, watcher=watcher)
except:
self.logger.error('Could not delete the dir {}'.format(NODE_DIR_PATH))
return False
def _install_node_lib(self, name: str, version: str, handler: ProcessHandler):
lib_repr = '{}{}'.format(name, '@{}'.format(version) if version else '')
self.logger.info("Installing {}".format(lib_repr))
if handler and handler.watcher:
handler.watcher.change_substatus(self.i18n['web.environment.install'].format(bold(lib_repr)))
proc = SimpleProcess([NPM_BIN_PATH, 'install', lib_repr], cwd=ENV_PATH, extra_paths=NODE_PATHS)
installed = handler.handle_simple(proc)[0]
if installed:
self.logger.info("{} successfully installed".format(lib_repr))
return installed
def _install_nativefier(self, version: str, url: str, handler: ProcessHandler) -> bool:
self.logger.info("Checking if nativefier@{} exists".format(version))
if not url or not self.http_client.exists(url):
self.logger.warning("The file {} seems not to exist".format(url))
handler.watcher.show_message(title=self.i18n['message.file.not_exist'],
body=self.i18n['message.file.not_exist.body'].format(bold(url)),
type_=MessageType.ERROR)
return False
success = self._install_node_lib('nativefier', version, handler)
if success:
return self._is_nativefier_installed()
def _is_nativefier_installed(self) -> bool:
return os.path.exists(NATIVEFIER_BIN_PATH)
def download_electron(self, version: str, url: str, widevine: bool, watcher: ProcessWatcher) -> bool:
Path(ELECTRON_PATH).mkdir(parents=True, exist_ok=True)
self.logger.info("Downloading Electron {}".format(version))
electron_path = self._get_electron_file_path(url=url, relative=False)
if not self.http_client.exists(url):
self.logger.warning("The file {} seems not to exist".format(url))
watcher.show_message(title=self.i18n['message.file.not_exist'],
body=self.i18n['message.file.not_exist.body'].format(bold(url)),
type_=MessageType.ERROR)
return False
return self.file_downloader.download(file_url=url, watcher=watcher, output_path=electron_path, cwd=ELECTRON_PATH)
def download_electron_sha256(self, version: str, url: str, widevine: bool, watcher: ProcessWatcher) -> bool:
self.logger.info("Downloading Electron {} sha526".format(version))
sha256_path = self._get_electron_file_path(url=url, relative=False)
if not self.http_client.exists(url):
self.logger.warning("The file {} seems not to exist".format(url))
watcher.show_message(title=self.i18n['message.file.not_exist'],
body=self.i18n['message.file.not_exist.body'].format(bold(url)),
type_=MessageType.ERROR)
return False
return self.file_downloader.download(file_url=url, watcher=watcher, output_path=sha256_path, cwd=ELECTRON_PATH)
def _get_electron_url(self, version: str, is_x86_x64_arch: bool, widevine: bool) -> str:
arch = 'x64' if is_x86_x64_arch else 'ia32'
if widevine:
return ELECTRON_WIDEVINE_URL.format(version=version, arch=arch)
else:
return ELECTRON_DOWNLOAD_URL.format(version=version, arch=arch)
def _get_electron_sha256_url(self, version: str, widevine: bool) -> str:
if widevine:
return ELECTRON_WIDEVINE_SHA256_URL.format(version=version)
else:
return ELECTRON_SHA256_URL.format(version=version)
def _get_electron_file_path(self, url: str, relative: bool) -> str:
file_path = url.replace(':', '').replace('/', '') + '/' + url.split('/')[-1]
return '{}/{}'.format(ELECTRON_PATH, file_path) if not relative else file_path
def check_electron_installed(self, version: str, is_x86_x64_arch: bool, widevine: bool) -> Dict[str, bool]:
self.logger.info("Checking if Electron {} (widevine={}) is installed".format(version, widevine))
res = {'electron': False, 'sha256': False}
if not os.path.exists(ELECTRON_PATH):
self.logger.info("The Electron folder {} was not found".format(ELECTRON_PATH))
else:
files = {f.split(ELECTRON_PATH + '/')[1] for f in glob.glob(ELECTRON_PATH + '/**', recursive=True) if os.path.isfile(f)}
if files:
electron_url = self._get_electron_url(version, is_x86_x64_arch, widevine)
file_path = self._get_electron_file_path(url=electron_url, relative=True)
res['electron'] = file_path in files
if not res['electron']:
res['sha256'] = True
else:
sha_url = self._get_electron_sha256_url(version=version, widevine=widevine)
sha_path = self._get_electron_file_path(url=sha_url, relative=True)
res['sha256'] = sha_path in files
else:
self.logger.info('No Electron file found in {}'.format(ELECTRON_PATH))
for att in ('electron', 'sha256'):
if res[att]:
self.logger.info('{} ({}) already downloaded'.format(att, version))
return res
def _finish_task_download_settings(self):
if self.taskman:
self.taskman.update_progress(self.task_read_settings_id, 100, None)
self.taskman.finish_task(self.task_read_settings_id)
def should_download_settings(self, web_config: dict) -> bool:
try:
settings_exp = int(web_config['environment']['cache_exp'])
except ValueError:
self.logger.error("Could not parse settings property 'environment.cache_exp': {}".format(web_config['environment']['cache_exp']))
return True
if settings_exp <= 0:
self.logger.info("No expiration time configured for the environment settings cache file.")
return True
self.logger.info("Checking cached environment settings file")
if not os.path.exists(ENVIRONMENT_SETTINGS_CACHED_FILE):
self.logger.warning("Environment settings file not cached.")
return True
if not os.path.exists(ENVIRONMENT_SETTINGS_TS_FILE):
self.logger.warning("Environment settings file has no timestamp associated with it.")
return True
with open(ENVIRONMENT_SETTINGS_TS_FILE) as f:
env_ts_str = f.read()
try:
env_timestamp = datetime.fromtimestamp(float(env_ts_str))
except:
self.logger.error("Could not parse environment settings file timestamp: {}".format(env_ts_str))
return True
expired = env_timestamp + timedelta(hours=settings_exp) <= datetime.utcnow()
if expired:
self.logger.info("Environment settings file has expired. It should be re-downloaded")
return True
else:
self.logger.info("Cached environment settings file is up to date")
return False
def read_cached_settings(self, web_config: dict) -> Optional[dict]:
if not self.should_download_settings(web_config):
with open(ENVIRONMENT_SETTINGS_CACHED_FILE) as f:
cached_settings_str = f.read()
try:
return yaml.safe_load(cached_settings_str)
except yaml.YAMLError:
self.logger.error('Could not parse the cache environment settings file: {}'.format(cached_settings_str))
def read_settings(self, web_config: dict, cache: bool = True) -> Optional[dict]:
if self.taskman:
self.taskman.register_task(self.task_read_settings_id, self.i18n['web.task.download_settings'], get_icon_path())
self.taskman.update_progress(self.task_read_settings_id, 1, None)
cached_settings = self.read_cached_settings(web_config) if cache else None
if cached_settings:
return cached_settings
try:
if self.taskman:
self.taskman.update_progress(self.task_read_settings_id, 10, None)
self.logger.info("Downloading environment settings")
res = self.http_client.get(URL_ENVIRONMENT_SETTINGS)
if not res:
self.logger.warning('Could not retrieve the environments settings from the cloud')
self._finish_task_download_settings()
return
try:
settings = yaml.safe_load(res.content)
except yaml.YAMLError:
self.logger.error('Could not parse environment settings: {}'.format(res.text))
self._finish_task_download_settings()
return
self.logger.info("Caching environment settings to disk")
cache_dir = os.path.dirname(ENVIRONMENT_SETTINGS_CACHED_FILE)
try:
Path(cache_dir).mkdir(parents=True, exist_ok=True)
except OSError:
self.logger.error("Could not create Web cache directory: {}".format(cache_dir))
self.logger.info('Finished')
self._finish_task_download_settings()
return
cache_timestamp = datetime.utcnow().timestamp()
with open(ENVIRONMENT_SETTINGS_CACHED_FILE, 'w+') as f:
f.write(yaml.safe_dump(settings))
with open(ENVIRONMENT_SETTINGS_TS_FILE, 'w+') as f:
f.write(str(cache_timestamp))
self._finish_task_download_settings()
self.logger.info("Finished")
return settings
except requests.exceptions.ConnectionError:
self._finish_task_download_settings()
return
def _check_and_fill_electron(self, pkg: WebApplication, env: dict, local_config: dict, x86_x64: bool, widevine: bool, output: List[EnvironmentComponent]):
electron_version = env['electron-wvvmp' if widevine else 'electron']['version']
if not widevine and pkg.version and pkg.version != electron_version: # this feature does not support custom widevine electron at the moment
self.logger.info('A preset Electron version is defined for {}: {}'.format(pkg.url, pkg.version))
electron_version = pkg.version
if not widevine and local_config['environment']['electron']['version']:
self.logger.warning("A custom Electron version will be used {} to install {}".format(electron_version, pkg.url))
electron_version = local_config['environment']['electron']['version']
electron_status = self.check_electron_installed(version=electron_version, is_x86_x64_arch=x86_x64, widevine=widevine)
electron_url = self._get_electron_url(version=electron_version, is_x86_x64_arch=x86_x64, widevine=widevine)
output.append(EnvironmentComponent(name=electron_url.split('/')[-1],
version=electron_version,
url=electron_url,
size=self.http_client.get_content_length(electron_url),
id='electron',
update=not electron_status['electron'],
properties={'widevine': widevine}))
sha_url = self._get_electron_sha256_url(version=electron_version, widevine=widevine)
output.append(EnvironmentComponent(name=sha_url.split('/')[-1],
version=electron_version,
url=sha_url,
size=self.http_client.get_content_length(sha_url),
id='electron_sha256',
update=not electron_status['electron'] or not electron_status['sha256'],
properties={'widevine': widevine}))
def _check_and_fill_node(self, env: dict, output: List[EnvironmentComponent]):
node = EnvironmentComponent(name=env['nodejs']['url'].split('/')[-1],
url=env['nodejs']['url'],
size=self.http_client.get_content_length(env['nodejs']['url']),
version=env['nodejs']['version'],
id='nodejs')
output.append(node)
native = self._map_nativefier_file(env['nativefier'])
output.append(native)
if not self.check_node_installed(env['nodejs']['version']):
node.update, native.update = True, True
else:
if not self._check_nativefier_installed(env['nativefier']):
native.update = True
def _check_nativefier_installed(self, nativefier_settings: dict) -> bool:
if not os.path.exists(NODE_MODULES_PATH):
self.logger.info('Node modules path {} not found'.format(NODE_MODULES_PATH))
return False
else:
if not self._is_nativefier_installed():
return False
installed_version = nativefier.get_version()
if installed_version:
installed_version = installed_version.strip()
self.logger.info("Nativefier versions: installed ({}), cloud ({})".format(installed_version, nativefier_settings['version']))
if nativefier_settings['version'] != installed_version:
self.logger.info("Installed nativefier version is different from cloud's. Changing version.")
return False
self.logger.info("Nativefier is already installed and up to date")
return True
def _map_nativefier_file(self, nativefier_settings: dict) -> EnvironmentComponent:
base_url = nativefier_settings.get('url')
if not base_url:
self.logger.warning("'url' not found in nativefier environment settings. Using hardcoded URL '{}'".format(NATIVEFIER_BASE_URL))
base_url = NATIVEFIER_BASE_URL
url = base_url.format(version=nativefier_settings['version'])
return EnvironmentComponent(name='nativefier@{}'.format(nativefier_settings['version']),
url=url,
size=self.http_client.get_content_length(url),
version=nativefier_settings['version'],
id='nativefier')
def check_environment(self, env: dict, local_config: dict, app: WebApplication,
is_x86_x64_arch: bool, widevine: bool) -> List[EnvironmentComponent]:
"""
:param app:
:param is_x86_x64_arch:
:return: the environment settings
"""
components, check_threads = [], []
system_env = local_config['environment'].get('system', False)
if system_env:
self.logger.warning("Using system's nativefier to install {}".format(app.url))
else:
node_check = Thread(target=self._check_and_fill_node, args=(env, components))
node_check.start()
check_threads.append(node_check)
elec_check = Thread(target=self._check_and_fill_electron, args=(app, env, local_config, is_x86_x64_arch, widevine, components))
elec_check.start()
check_threads.append(elec_check)
for t in check_threads:
t.join()
return components
def update(self, components: List[EnvironmentComponent], handler: ProcessHandler) -> bool:
self.logger.info('Updating environment')
Path(ENV_PATH).mkdir(parents=True, exist_ok=True)
comp_map = {c.id: c for c in components}
node_data = comp_map.get('nodejs')
nativefier_data = comp_map.get('nativefier')
if node_data:
if not self._download_and_install(version=node_data.version, version_url=node_data.url, watcher=handler.watcher):
return False
if not self._install_nativefier(version=nativefier_data.version, url=nativefier_data.url, handler=handler):
return False
else:
if nativefier_data and not self._install_nativefier(version=nativefier_data.version, url=nativefier_data.url, handler=handler):
return False
electron_data = comp_map.get('electron')
if electron_data:
if not self.download_electron(version=electron_data.version, url=electron_data.url, watcher=handler.watcher, widevine=electron_data.properties['widevine']):
return False
sha256_data = comp_map.get('electron_sha256')
if sha256_data:
if not self.download_electron_sha256(version=sha256_data.version, url=sha256_data.url, watcher=handler.watcher, widevine=sha256_data.properties['widevine']):
return False
self.logger.info('Environment successfully updated')
return True
| 45.772467 | 169 | 0.619073 |
7946b16ca90dfcc3d54ff43875533eb7a7f8a9b4 | 23,198 | py | Python | graspologic/match/qap.py | kellymarchisio/graspologic | 9aaca9067142c2e83d4cbc6bfa41738e64b3c066 | [
"MIT"
] | null | null | null | graspologic/match/qap.py | kellymarchisio/graspologic | 9aaca9067142c2e83d4cbc6bfa41738e64b3c066 | [
"MIT"
] | null | null | null | graspologic/match/qap.py | kellymarchisio/graspologic | 9aaca9067142c2e83d4cbc6bfa41738e64b3c066 | [
"MIT"
] | null | null | null | # adapted from scipy.optimze.quadratic_assignment()
# will live here temporalily until this function is officially released
# original code can be found here
# https://github.com/scipy/scipy/blob/master/scipy/optimize/_qap.py
import operator
import numpy as np
from scipy._lib._util import check_random_state
from scipy.optimize import OptimizeResult, linear_sum_assignment
def quadratic_assignment(A, B, method="faq", options=None):
r"""
Approximates solution to the quadratic assignment problem and
the graph matching problem.
Quadratic assignment solves problems of the following form:
.. math::
\min_P & \ {\ \text{trace}(A^T P B P^T)}\\
\mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
where :math:`\mathcal{P}` is the set of all permutation matrices,
and :math:`A` and :math:`B` are square matrices.
Graph matching tries to *maximize* the same objective function.
This algorithm can be thought of as finding the alignment of the
nodes of two graphs that minimizes the number of induced edge
disagreements, or, in the case of weighted graphs, the sum of squared
edge weight differences.
Note that the quadratic assignment problem is NP-hard, is not
known to be solvable in polynomial time, and is computationally
intractable. Therefore, the results given are approximations,
not guaranteed to be exact solutions.
Parameters
----------
A : 2d-array, square
The square matrix :math:`A` in the objective function above.
B : 2d-array, square
The square matrix :math:`B` in the objective function above.
method : str in {'faq', '2opt'} (default: 'faq')
The algorithm used to solve the problem.
:ref:`'faq' <optimize.qap-faq>` (default) and
:ref:`'2opt' <optimize.qap-2opt>` are available.
options : dict, optional
A dictionary of solver options. All solvers support the following:
partial_match : 2d-array of integers, optional, (default = None)
Allows the user to fix part of the matching between the two
matrices. In the literature, a partial match is also
known as a "seed" [2]_.
Each row of `partial_match` specifies the indices of a pair of
corresponding nodes, that is, node ``partial_match[i, 0]`` of `A`
is matched to node ``partial_match[i, 1]`` of `B`. Accordingly,
``partial_match`` is an array of size ``(m , 2)``, where ``m`` is
not greater than the number of nodes.
maximize : bool (default = False)
Setting `maximize` to ``True`` solves the Graph Matching Problem
(GMP) rather than the Quadratic Assingnment Problem (QAP).
rng : {None, int, `~np.random.RandomState`, `~np.random.Generator`}
This parameter defines the object to use for drawing random
variates.
If `rng` is ``None`` the `~np.random.RandomState` singleton is
used.
If `rng` is an int, a new ``RandomState`` instance is used,
seeded with `rng`.
If `rng` is already a ``RandomState`` or ``Generator``
instance, then that object is used.
Default is None.
For method-specific options, see
:func:`show_options('quadratic_assignment') <show_options>`.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` containing the following
fields.
col_ind : 1-D array
An array of column indices corresponding with the best
permutation of the nodes of `B` found.
fun : float
The corresponding value of the objective function.
nit : int
The number of iterations performed during optimization.
Notes
-----
The default method :ref:`'faq' <optimize.qap-faq>` uses the Fast
Approximate QAP algorithm [1]_; it is typically offers the best
combination of speed and accuracy.
Method :ref:`'2opt' <optimize.qap-2opt>` can be computationally expensive,
but may be a useful alternative, or it can be used to refine the solution
returned by another method.
References
----------
.. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
C.E. Priebe, "Fast approximate quadratic programming for graph
matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
:doi:`10.1371/journal.pone.0121002`
.. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
203-215, :doi:`10.1016/j.patcog.2018.09.014`
.. [3] "2-opt," Wikipedia.
https://en.wikipedia.org/wiki/2-opt
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import quadratic_assignment
>>> A = np.array([[0, 80, 150, 170], [80, 0, 130, 100],
... [150, 130, 0, 120], [170, 100, 120, 0]])
>>> B = np.array([[0, 5, 2, 7], [0, 0, 3, 8],
... [0, 0, 0, 3], [0, 0, 0, 0]])
>>> res = quadratic_assignment(A, B)
>>> print(res)
col_ind: array([0, 3, 2, 1])
fun: 3260
nit: 9
The see the relationship between the returned ``col_ind`` and ``fun``,
use ``col_ind`` to form the best permutation matrix found, then evaluate
the objective function :math:`f(P) = trace(A^T P B P^T )`.
>>> n = A.shape[0]
>>> perm = res['col_ind']
>>> P = np.eye(n, dtype=int)[perm]
>>> fun = np.trace(A.T @ P @ B @ P.T)
>>> print(fun)
3260
Alternatively, to avoid constructing the permutation matrix explicitly,
directly permute the rows and columns of the distance matrix.
>>> fun = np.trace(A.T @ B[perm][:, perm])
>>> print(fun)
3260
Although not guaranteed in general, ``quadratic_assignment`` happens to
have found the globally optimal solution.
>>> from itertools import permutations
>>> perm_opt, fun_opt = None, np.inf
>>> for perm in permutations([0, 1, 2, 3]):
... perm = np.array(perm)
... fun = np.trace(A.T @ B[perm][:, perm])
... if fun < fun_opt:
... fun_opt, perm_opt = fun, perm
>>> print(np.array_equal(perm_opt, res['col_ind']))
True
Here is an example for which the default method,
:ref:`'faq' <optimize.qap-faq>`, does not find the global optimum.
>>> A = np.array([[0, 5, 8, 6], [5, 0, 5, 1],
... [8, 5, 0, 2], [6, 1, 2, 0]])
>>> B = np.array([[0, 1, 8, 4], [1, 0, 5, 2],
... [8, 5, 0, 5], [4, 2, 5, 0]])
>>> res = quadratic_assignment(A, B)
>>> print(res)
col_ind: array([1, 0, 3, 2])
fun: 178
nit: 13
If accuracy is important, consider using :ref:`'2opt' <optimize.qap-2opt>`
to refine the solution.
>>> guess = np.array([np.arange(A.shape[0]), res.col_ind]).T
>>> res = quadratic_assignment(A, B, method="2opt",
... options = {'partial_guess': guess})
>>> print(res)
col_ind: array([1, 2, 3, 0])
fun: 176
nit: 17
"""
if options is None:
options = {}
method = method.lower()
methods = {"faq": _quadratic_assignment_faq}
if method not in methods:
raise ValueError(f"method {method} must be in {methods}.")
res = methods[method](A, B, **options)
return res
def _calc_score(A, B, S, perm):
# equivalent to objective function but avoids matmul
return np.sum(A * B[perm][:, perm]) + np.sum(S[np.arange(len(S)), perm])
def _common_input_validation(A, B, partial_match):
A = np.atleast_2d(A)
B = np.atleast_2d(B)
if partial_match is None:
partial_match = np.array([[], []]).T
partial_match = np.atleast_2d(partial_match).astype(int)
msg = None
if A.shape[0] != A.shape[1]:
msg = "`A` must be square"
elif B.shape[0] != B.shape[1]:
msg = "`B` must be square"
elif A.ndim != 2 or B.ndim != 2:
msg = "`A` and `B` must have exactly two dimensions"
elif A.shape != B.shape:
msg = "`A` and `B` matrices must be of equal size"
elif partial_match.shape[0] > A.shape[0]:
msg = "`partial_match` can have only as many seeds as there are nodes"
elif partial_match.shape[1] != 2:
msg = "`partial_match` must have two columns"
elif partial_match.ndim != 2:
msg = "`partial_match` must have exactly two dimensions"
elif (partial_match < 0).any():
msg = "`partial_match` must contain only positive indices"
elif (partial_match >= len(A)).any():
msg = "`partial_match` entries must be less than number of nodes"
elif not len(set(partial_match[:, 0])) == len(partial_match[:, 0]) or not len(
set(partial_match[:, 1])
) == len(partial_match[:, 1]):
msg = "`partial_match` column entries must be unique"
if msg is not None:
raise ValueError(msg)
return A, B, partial_match
def _quadratic_assignment_faq(
A,
B,
maximize=False,
partial_match=None,
S=None,
rng=None,
P0="barycenter",
shuffle_input=False,
maxiter=30,
tol=0.03,
):
r"""
Solve the quadratic assignment problem (approximately).
This function solves the Quadratic Assignment Problem (QAP) and the
Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm
(FAQ) [1]_.
Quadratic assignment solves problems of the following form:
.. math::
\min_P & \ {\ \text{trace}(A^T P B P^T)}\\
\mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
where :math:`\mathcal{P}` is the set of all permutation matrices,
and :math:`A` and :math:`B` are square matrices.
Graph matching tries to *maximize* the same objective function.
This algorithm can be thought of as finding the alignment of the
nodes of two graphs that minimizes the number of induced edge
disagreements, or, in the case of weighted graphs, the sum of squared
edge weight differences.
Note that the quadratic assignment problem is NP-hard, is not
known to be solvable in polynomial time, and is computationally
intractable. Therefore, the results given are approximations,
not guaranteed to be exact solutions.
Parameters
----------
A : 2d-array, square
The square matrix :math:`A` in the objective function above.
B : 2d-array, square
The square matrix :math:`B` in the objective function above.
method : str in {'faq', '2opt'} (default: 'faq')
The algorithm used to solve the problem. This is the method-specific
documentation for 'faq'.
:ref:`'2opt' <optimize.qap-2opt>` is also available.
Options
-------
maximize : bool (default = False)
Setting `maximize` to ``True`` solves the Graph Matching Problem (GMP)
rather than the Quadratic Assingnment Problem (QAP). This is
accomplished through trivial negation of the objective function.
rng : {None, int, `~np.random.RandomState`, `~np.random.Generator`}
This parameter defines the object to use for drawing random
variates.
If `rng` is ``None`` the `~np.random.RandomState` singleton is
used.
If `rng` is an int, a new ``RandomState`` instance is used,
seeded with `rng`.
If `rng` is already a ``RandomState`` or ``Generator``
instance, then that object is used.
Default is None.
partial_match : 2d-array of integers, optional, (default = None)
Allows the user to fix part of the matching between the two
matrices. In the literature, a partial match is also known as a
"seed".
Each row of `partial_match` specifies the indices of a pair of
corresponding nodes, that is, node ``partial_match[i, 0]`` of `A` is
matched to node ``partial_match[i, 1]`` of `B`. Accordingly,
``partial_match`` is an array of size ``(m , 2)``, where ``m`` is
not greater than the number of nodes, :math:`n`.
S : 2d-array, square
A similarity matrix. Should be same shape as ``A`` and ``B``.
Note: the scale of `S` may effect the weight placed on the term
:math:`\\text{trace}(S^T P)` relative to :math:`\\text{trace}(A^T PBP^T)`
during the optimization process.
P0 : 2d-array, "barycenter", or "randomized" (default = "barycenter")
The initial (guess) permutation matrix or search "position"
`P0`.
`P0` need not be a proper permutation matrix;
however, it must be :math:`m' x m'`, where :math:`m' = n - m`,
and it must be doubly stochastic: each of its rows and columns must
sum to 1.
If unspecified or ``"barycenter"``, the non-informative "flat
doubly stochastic matrix" :math:`J = 1*1^T/m'`, where :math:`1` is
a :math:`m' \times 1` array of ones, is used. This is the "barycenter"
of the search space of doubly-stochastic matrices.
If ``"randomized"``, the algorithm will start from the
randomized initial search position :math:`P_0 = (J + K)/2`,
where :math:`J` is the "barycenter" and :math:`K` is a random
doubly stochastic matrix.
shuffle_input : bool (default = False)
To avoid artificially high or low matching due to inherent
sorting of input matrices, gives users the option
to shuffle the nodes. Results are then unshuffled so that the
returned results correspond with the node order of inputs.
Shuffling may cause the algorithm to be non-deterministic,
unless a random seed is set or an `rng` option is provided.
maxiter : int, positive (default = 30)
Integer specifying the max number of Franke-Wolfe iterations performed.
tol : float (default = 0.03)
A threshold for the stopping criterion. Franke-Wolfe
iteration terminates when the change in search position between
iterations is sufficiently small, that is, when the relative Frobenius
norm, :math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{len(P_{i})}} \leq tol`,
where :math:`i` is the iteration number.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` containing the following
fields.
col_ind : 1-D array
An array of column indices corresponding with the best
permutation of the nodes of `B` found.
fun : float
The corresponding value of the objective function.
nit : int
The number of Franke-Wolfe iterations performed.
Notes
-----
The algorithm may be sensitive to the initial permutation matrix (or
search "position") due to the possibility of several local minima
within the feasible region. A barycenter initialization is more likely to
result in a better solution than a single random initialization. However,
``quadratic_assignment`` calling several times with different random
initializations may result in a better optimum at the cost of longer
total execution time.
Examples
--------
As mentioned above, a barycenter initialization often results in a better
solution than a single random initialization.
>>> np.random.seed(0)
>>> n = 15
>>> A = np.random.rand(n, n)
>>> B = np.random.rand(n, n)
>>> res = quadratic_assignment(A, B) # FAQ is default method
>>> print(res.fun)
46.871483385480545 # may vary
>>> options = {"P0": "randomized"} # use randomized initialization
>>> res = quadratic_assignment(A, B, options=options)
>>> print(res.fun)
47.224831071310625 # may vary
However, consider running from several randomized initializations and
keeping the best result.
>>> res = min([quadratic_assignment(A, B, options=options)
... for i in range(30)], key=lambda x: x.fun)
>>> print(res.fun)
46.671852533681516 # may vary
The '2-opt' method can be used to further refine the results.
>>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T}
>>> res = quadratic_assignment(A, B, method="2opt", options=options)
>>> print(res.fun)
46.47160735721583 # may vary
References
----------
.. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
C.E. Priebe, "Fast approximate quadratic programming for graph
matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
:doi:`10.1371/journal.pone.0121002`
.. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
203-215, :doi:`10.1016/j.patcog.2018.09.014`
"""
maxiter = operator.index(maxiter)
# ValueError check
A, B, partial_match = _common_input_validation(A, B, partial_match)
msg = None
if isinstance(P0, str) and P0 not in {"barycenter", "randomized"}:
msg = "Invalid 'P0' parameter string"
elif maxiter <= 0:
msg = "'maxiter' must be a positive integer"
elif tol <= 0:
msg = "'tol' must be a positive float"
elif S.shape[0] != S.shape[1]:
msg = "`S` must be square"
elif S.ndim != 2:
msg = "`S` must have exactly two dimensions"
elif S.shape != A.shape:
msg = "`S`, `A`, and `B` matrices must be of equal size"
if msg is not None:
raise ValueError(msg)
rng = check_random_state(rng)
n = A.shape[0] # number of vertices in graphs
n_seeds = partial_match.shape[0] # number of seeds
n_unseed = n - n_seeds
# check outlier cases
if n == 0 or partial_match.shape[0] == n:
score = _calc_score(A, B, S, partial_match[:, 1])
res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
return OptimizeResult(res)
obj_func_scalar = 1
if maximize:
obj_func_scalar = -1
nonseed_B = np.setdiff1d(range(n), partial_match[:, 1])
perm_S = np.copy(nonseed_B)
if shuffle_input:
nonseed_B = rng.permutation(nonseed_B)
# shuffle_input to avoid results from inputs that were already matched
nonseed_A = np.setdiff1d(range(n), partial_match[:, 0])
perm_A = np.concatenate([partial_match[:, 0], nonseed_A])
perm_B = np.concatenate([partial_match[:, 1], nonseed_B])
S = S[:, perm_B]
# definitions according to Seeded Graph Matching [2].
A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds)
B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds)
S22 = S[perm_S, n_seeds:]
# [1] Algorithm 1 Line 1 - choose initialization
if isinstance(P0, str):
# initialize J, a doubly stochastic barycenter
J = np.ones((n_unseed, n_unseed)) / n_unseed
if P0 == "barycenter":
P = J
elif P0 == "randomized":
# generate a nxn matrix where each entry is a random number [0, 1]
# would use rand, but Generators don't have it
# would use random, but old mtrand.RandomStates don't have it
K = rng.uniform(size=(n_unseed, n_unseed))
# Sinkhorn balancing
K = _doubly_stochastic(K)
P = J * 0.5 + K * 0.5
elif isinstance(P0, np.ndarray):
P0 = np.atleast_2d(P0)
_check_init_input(P0, n_unseed)
invert_inds = np.argsort(nonseed_B)
perm_nonseed_B = np.argsort(invert_inds)
P = P0[:, perm_nonseed_B]
else:
msg = "`init` must either be of type str or np.ndarray."
raise TypeError(msg)
const_sum = A21 @ B21.T + A12.T @ B12 + S22
# [1] Algorithm 1 Line 2 - loop while stopping criteria not met
for n_iter in range(1, maxiter + 1):
# [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t)
grad_fp = const_sum + A22 @ P @ B22.T + A22.T @ P @ B22
# [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8
_, cols = linear_sum_assignment(grad_fp, maximize=maximize)
Q = np.eye(n_unseed)[cols]
# [1] Algorithm 1 Line 5 - compute the step size
# Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect
# terms as ax**2 + bx + c. c does not affect location of minimum
# and can be ignored. Also, note that trace(A@B) = (A.T*B).sum();
# apply where possible for efficiency.
R = P - Q
b21 = ((R.T @ A21) * B21).sum()
b12 = ((R.T @ A12.T) * B12.T).sum()
AR22 = A22.T @ R
BR22 = B22 @ R.T
b22a = (AR22 * B22.T[cols]).sum()
b22b = (A22 * BR22[cols]).sum()
s = (S22 * R).sum()
a = (AR22.T * BR22).sum()
b = b21 + b12 + b22a + b22b + s
# critical point of ax^2 + bx + c is at x = -d/(2*e)
# if a * obj_func_scalar > 0, it is a minimum
# if minimum is not in [0, 1], only endpoints need to be considered
if a * obj_func_scalar > 0 and 0 <= -b / (2 * a) <= 1:
alpha = -b / (2 * a)
else:
alpha = np.argmin([0, (b + a) * obj_func_scalar])
# [1] Algorithm 1 Line 6 - Update P
P_i1 = alpha * P + (1 - alpha) * Q
if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol:
P = P_i1
break
P = P_i1
# [1] Algorithm 1 Line 7 - end main loop
# [1] Algorithm 1 Line 8 - project onto the set of permutation matrices
_, col = linear_sum_assignment(-P)
perm = np.concatenate((np.arange(n_seeds), col + n_seeds))
unshuffled_perm = np.zeros(n, dtype=int)
unshuffled_perm[perm_A] = perm_B[perm]
score = _calc_score(A, B, S, unshuffled_perm)
res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter}
return OptimizeResult(res)
def _check_init_input(P0, n):
row_sum = np.sum(P0, axis=0)
col_sum = np.sum(P0, axis=1)
tol = 1e-3
msg = None
if P0.shape != (n, n):
msg = "`P0` matrix must have shape m' x m', where m'=n-m"
elif (
(~np.isclose(row_sum, 1, atol=tol)).any()
or (~np.isclose(col_sum, 1, atol=tol)).any()
or (P0 < 0).any()
):
msg = "`P0` matrix must be doubly stochastic"
if msg is not None:
raise ValueError(msg)
def _split_matrix(X, n):
# definitions according to Seeded Graph Matching [2].
upper, lower = X[:n], X[n:]
return upper[:, :n], upper[:, n:], lower[:, :n], lower[:, n:]
def _doubly_stochastic(P, tol=1e-3):
# Adapted from @btaba implementation
# https://github.com/btaba/sinkhorn_knopp
# of Sinkhorn-Knopp algorithm
# https://projecteuclid.org/euclid.pjm/1102992505
max_iter = 1000
c = 1 / P.sum(axis=0)
r = 1 / (P @ c)
P_eps = P
for it in range(max_iter):
if (np.abs(P_eps.sum(axis=1) - 1) < tol).all() and (
np.abs(P_eps.sum(axis=0) - 1) < tol
).all():
# All column/row sums ~= 1 within threshold
break
c = 1 / (r @ P)
r = 1 / (P @ c)
P_eps = r[:, None] * P * c
return P_eps
| 41.949367 | 82 | 0.609578 |
7946b20e92530ddc84221f0e9f2b6c02461e990f | 1,035 | py | Python | fluxcd_teams_bot/parser.py | binkhq/fluxcd-teams-webhook | 617202b003cec7c75e77c691ab002ee056f537d5 | [
"Apache-2.0"
] | null | null | null | fluxcd_teams_bot/parser.py | binkhq/fluxcd-teams-webhook | 617202b003cec7c75e77c691ab002ee056f537d5 | [
"Apache-2.0"
] | 1 | 2021-03-02T09:55:14.000Z | 2021-03-02T09:55:14.000Z | fluxcd_teams_bot/parser.py | binkhq/fluxcd-teams-webhook | 617202b003cec7c75e77c691ab002ee056f537d5 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Tuple, Union, cast
from fluxcd_teams_bot.cards import AutoreleaseCard, Card, ErrorCard
def split_workload_id(workload_id: str) -> Tuple[str, str]:
return cast(Tuple[str, str], workload_id.split(":", 1))
def parse(data: Dict[str, Any]) -> Union[Card, None]:
type_ = data.get("type")
if type_ == "sync":
if "errors" in data["metadata"]:
# Sync Error Card
e_card = ErrorCard()
for error in data["metadata"]["errors"]:
namespace, resource = split_workload_id(error["ID"])
e_card.add_error(namespace, resource, error["Path"], error["Error"])
return e_card
elif type_ == "autorelease":
a_card = AutoreleaseCard()
for change in data["metadata"]["spec"]["Changes"]:
namespace, resource = split_workload_id(change["WorkloadID"])
a_card.add_autorelease(namespace, resource, change["Container"]["Image"], change["ImageID"])
return a_card
return None
| 32.34375 | 104 | 0.619324 |
7946b2802b599f2ed89792ed740bb1e3186b9166 | 3,339 | py | Python | src/programy/dynamic/maps/roman.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/dynamic/maps/roman.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | null | null | null | src/programy/dynamic/maps/roman.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
] | 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.dynamic.maps.map import DynamicMap
# Code stolen from http://code.activestate.com/recipes/81611-roman-numerals/
NUMERAL_MAP = zip(
(1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),
('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
)
class MapRomanToDecimal(DynamicMap):
NAME = "ROMANTODEC"
def __init__(self, config):
DynamicMap.__init__(self, config)
def map_value(self, client_context, input_value):
if not isinstance(input_value, str):
raise TypeError("expected string, got %s" % type(input_value))
input_value = input_value.upper()
nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
ints = [1000, 500, 100, 50, 10, 5, 1]
places = []
for char in input_value:
if char not in nums:
raise ValueError("input_value is not a valid roman numeral: %s" % input_value)
charnum = 0
for char in input_value:
value = ints[nums.index(char)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(input_value[charnum + 1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
charnum += 1
total = 0
for num in places:
total += num
return str(total)
class MapDecimalToRoman(DynamicMap):
NAME = "DECTOROMAN"
def __init__(self, config):
DynamicMap.__init__(self, config)
def map_value(self, client_context, input_value):
input_value = int(input_value)
if not 0 < input_value < 4000:
raise ValueError("Argument must be between 1 and 3999")
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
result = ""
num = 0
for num_str in nums:
count = int(input_value / ints[num])
result += num_str * count
input_value -= ints[num] * count
num += 1
return result
| 38.825581 | 120 | 0.622342 |
7946b28f799ad654e97e4b6d307b7ea0b1693f9d | 1,517 | py | Python | app/libs/utils.py | damnever/2L | 35dab73106e5879155a647b31c81ae5dea18b89d | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:16:30.000Z | 2019-03-11T12:16:30.000Z | app/libs/utils.py | damnever/2L | 35dab73106e5879155a647b31c81ae5dea18b89d | [
"BSD-3-Clause"
] | 1 | 2017-04-16T02:11:58.000Z | 2017-04-16T02:11:58.000Z | app/libs/utils.py | damnever/2L | 35dab73106e5879155a647b31c81ae5dea18b89d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import re
import os
import base64
import uuid
import importlib
import pkgutil
import hashlib
from app.settings import ROOT_DIR
_AT_RE = re.compile(r'@(?P<name>[^ ,\.;:!\?"\':]+)', re.M | re.S)
def load_module_attrs(pkg_path, func, recursive=False):
"""Get attributes from modules, use ``func`` to filter attributes,
``func`` must return a list.
"""
attrs = list()
root_pkg = os.path.basename(ROOT_DIR)
pkg_name = root_pkg + pkg_path.split(root_pkg)[1].replace('/', '.')
for _, name, ispkg in pkgutil.iter_modules([pkg_path]):
if ispkg and recursive:
next_path = os.path.join(pkg_path, name)
attrs.extend(load_module_attrs(next_path, func, recursive))
continue
module = importlib.import_module('.' + name, pkg_name)
attr = func(module)
if attr:
attrs.extend(attr)
return attrs
def encrypt_password(password):
"""Yes, I do know what I am thinking..."""
mid = ''.join([hex(ord(w))[2:] for w in password])
return hashlib.sha1(mid).hexdigest()
def gen_token():
uuid4bytes = lambda: uuid.uuid4().bytes
return base64.b64encode(uuid4bytes() + uuid4bytes())
def at_content(content, url='/user/'):
"""Find all users and convert @someone to [@someone](<url>someone)."""
users = _AT_RE.findall(content)
val = _AT_RE.sub(r'[@\1]({0}\1)'.format(url), content)
return users, val
| 26.614035 | 74 | 0.642716 |
7946b30fa0a94fcfbd9cc041fc64da87d54e7ce1 | 3,781 | py | Python | configs/foveabox/fovea_align_gn_r50_fpn_2gpu_1x_5cut.py | wangbingo/mmdetection | a1fb7f9d882e8bbf0138aead7a143e88e93d9896 | [
"Apache-2.0"
] | null | null | null | configs/foveabox/fovea_align_gn_r50_fpn_2gpu_1x_5cut.py | wangbingo/mmdetection | a1fb7f9d882e8bbf0138aead7a143e88e93d9896 | [
"Apache-2.0"
] | null | null | null | configs/foveabox/fovea_align_gn_r50_fpn_2gpu_1x_5cut.py | wangbingo/mmdetection | a1fb7f9d882e8bbf0138aead7a143e88e93d9896 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='FOVEA',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=4, ###
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs=True),
bbox_head=dict(
type='FoveaHead',
num_classes=2, ###
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.50,
alpha=0.4,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict()
test_cfg = dict(
nms_pre=1000,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/root/docker_mounts_ssd/pap_work/trainset/pos_cocotype_cut_largest_20per_5times_20191115/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1400, 1400), keep_ratio=True), ###
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1100, 1100), ###
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=1,
workers_per_gpu=0,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/train_80pct.json',
img_prefix=data_root + 'images/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/train_20pct.json',
img_prefix=data_root + 'images/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/fovea_align_gn_r50_fpn_2gpu_1x_5cut'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 31.508333 | 103 | 0.604866 |
7946b4c1492344d18620cf2105e8dc75de3694f0 | 9,761 | py | Python | scripts/cli/parsing/shared.py | scherroman/mvgen | 0e79079b3fb71e94c67d48fd5599b6c43602d3b5 | [
"MIT"
] | 9 | 2016-11-28T00:54:57.000Z | 2016-12-22T21:21:17.000Z | scripts/cli/parsing/shared.py | scherroman/mvgen | 0e79079b3fb71e94c67d48fd5599b6c43602d3b5 | [
"MIT"
] | null | null | null | scripts/cli/parsing/shared.py | scherroman/mvgen | 0e79079b3fb71e94c67d48fd5599b6c43602d3b5 | [
"MIT"
] | null | null | null | import argparse
from fractions import Fraction
from mugen import VideoFilter
from mugen.video.filters import DEFAULT_VIDEO_FILTERS
from mugen.video.io.VideoWriter import VideoWriter
from scripts.cli.events import AudioEventsMode, BeatsMode, OnsetsMode, TargetGroups
DEFAULT_MUSIC_VIDEO_NAME = "music_video"
def get_audio_parser():
audio_parser = argparse.ArgumentParser(add_help=False)
audio_parser.add_argument(
"-a",
"--audio-source",
dest="audio_source",
required=True,
help="The audio file for the music video. Supports any audio format supported by ffmpeg, such as wav, aiff, flac, ogg, mp3, etc...",
)
audio_parser.add_argument(
"-uoa",
"--use-original-audio",
dest="use_original_audio",
action="store_true",
default=False,
help="Whether or not to use the original audio from the video segments for the music video",
)
audio_parser.add_argument(
"-aem",
"--audio-events-mode",
dest="audio_events_mode",
default=AudioEventsMode.BEATS,
help=f"Method of generating events from the audio file. Supported values are {[e.value for e in AudioEventsMode]}",
)
audio_parser.add_argument(
"-bm",
"--beats-mode",
dest="beats_mode",
default=BeatsMode.BEATS,
help=f"Method of generating beat events from the audio file. Supported values are {[e.value for e in BeatsMode]}",
)
audio_parser.add_argument(
"-om",
"--onsets-mode",
dest="onsets_mode",
default=OnsetsMode.ONSETS,
help=f"Method of generating onset events from the audio file. Supported values are {[e.value for e in OnsetsMode]}",
)
audio_parser.add_argument(
"-ac",
"--audio-codec",
dest="audio_codec",
default=VideoWriter.DEFAULT_AUDIO_CODEC,
help="The audio codec for the music video if --use-original-audio is enabled. Supports any codec supported by moviepy.",
)
audio_parser.add_argument(
"-ab",
"--audio-bitrate",
dest="audio_bitrate",
type=int,
default=VideoWriter.DEFAULT_AUDIO_BITRATE,
help="The audio bitrate (kbps) for the music video if --use-original-audio is enabled",
)
return audio_parser
def get_video_parser():
video_parser = argparse.ArgumentParser(add_help=False)
video_parser.add_argument(
"-vn",
"--video-name",
dest="video_name",
default=DEFAULT_MUSIC_VIDEO_NAME,
help="The name for the music video. On subsequent runs, the program will output <music_video_name>_0, <music_video_name>_1, etc...",
)
video_parser.add_argument(
"-vf",
"--video-filters",
dest="video_filters",
nargs="+",
default=DEFAULT_VIDEO_FILTERS,
help=f"Video filters that each segment in the music video must pass. Supported values are {[filter.name for filter in VideoFilter]}",
)
video_parser.add_argument(
"-evf",
"--exclude-video-filters",
dest="exclude_video_filters",
nargs="+",
help="Video filters to exclude from the default video filters. See video_filters for supported values",
)
video_parser.add_argument(
"-ivf",
"--include-video-filters",
dest="include_video_filters",
nargs="+",
help="Video filters to include in addition to the default video filters. See video_filters for supported values",
)
video_parser.add_argument(
"-vpre",
"--video-preset",
dest="video_preset",
default=VideoWriter.DEFAULT_VIDEO_PRESET,
help="Tunes the time that FFMPEG will spend optimizing compression while writing the music video to file. See FFMPEG documentation for more info",
)
video_parser.add_argument(
"-vcod",
"--video-codec",
dest="video_codec",
default=VideoWriter.DEFAULT_VIDEO_CODEC,
help="The video codec for the music video. Supports any codec supported by FFMPEG",
)
video_parser.add_argument(
"-vcrf",
"--video-crf",
dest="video_crf",
type=int,
default=VideoWriter.DEFAULT_VIDEO_CRF,
help="The crf quality value for the music video. Takes an integer from 0 (lossless) to 51 (lossy)",
)
video_parser.add_argument(
"-vdim",
"--video-dimensions",
dest="video_dimensions",
type=int,
nargs=2,
help="""The pixel dimensions for the music video, width and height.
All video segments will be resized (cropped and/or scaled) appropriately
to match these dimensions. Otherwise, the largest dimensions available are used.
Takes width then height integer values separated by spaces e.g., 1920 1080""",
)
video_parser.add_argument(
"-vasp",
"--video-aspect-ratio",
dest="video_aspect_ratio",
type=Fraction,
help="The aspect ratio for the music video (overruled by --dimensions). Takes a fraction. i.e.) 16/9",
)
video_parser.add_argument(
"-ss",
"--save-segments",
dest="save_segments",
action="store_true",
default=False,
help="Save all the individual segments that compose the music video.",
)
video_parser.add_argument(
"-srs",
"--save-rejected-segments",
dest="save_rejected_segments",
action="store_true",
default=False,
help="Save all rejected segments that did not pass filters.",
)
return video_parser
def get_event_parser():
event_parser = argparse.ArgumentParser(add_help=False)
event_parser.add_argument(
"-d",
"--duration",
dest="duration",
type=float,
help="Manually set the duration of the music video",
)
event_parser.add_argument(
"-el",
"--event-locations",
dest="event_locations",
type=float,
nargs="+",
help="""Manually enter event locations in seconds for the audio file.
Usually this corresponds to beats in the music, or any location where one feels
there should be a cut between clips in the music video.
If this option is specified alongside --audio-events-mode, both will be combined.
Takes a list of numerical values separated by spaces""",
)
event_parser.add_argument(
"-eo",
"--events-offset",
dest="events_offset",
type=float,
help="Global offset for event locations in seconds."
"If using -es/--events-speed and events are not showing up where desired, try using -eso/--events-speed-offset before this option.",
)
event_parser.add_argument(
"-es",
"--events-speed",
dest="events_speed",
type=Fraction,
help="Global speed up or slow down for events in the music video. "
"Should be of the form x or 1/x, where x is a natural number. "
"(e.g.) 2 for double speed, or 1/2 for half speed. "
"For slowdown multipliers, events are merged towards the left "
"(e.g.) Given beat events 1, 2, 3, 4, a slowdown of 1/2 would result in preserving events 1 and 3",
)
event_parser.add_argument(
"-eso",
"--events-speed-offset",
dest="events_speed_offset",
type=int,
help="Offset for the merging of events on a slowdown speed multiplier. Takes an integer, with a max offset of x - 1 for a slowdown of 1/x",
)
event_parser.add_argument(
"-gebs",
"--group-events-by-slices",
dest="group_events_by_slices",
type=slice,
nargs="+",
help="""Group events by one or more slices.
Must be of the form start,stop or (start,stop).
Events will be grouped starting at "start", up to but not including "stop".
Groups explicitly specified by slices will become "selected" groups.
Any surrounding "unselected" groups will be filled in automatically.
e.g.) If there are 40 events, a slice of (20,30) results in three groups
(0,20) (20,30) (30,39), with one selected group (20,30)""",
)
event_parser.add_argument(
"-gebt",
"--group-events-by-type",
dest="group_events_by_type",
nargs="*",
help="""Group events by type. Useful for modes like the weak_beats beats mode.
e.g.) If our events are: <10 WeakBeat, 20 Beat, 10 WeakBeat>, passing this option
with WeakBeat will result in three groups (0,9) (9,29) (29,39),
with two selected groups (0,9) (29,39)""",
)
event_parser.add_argument(
"-tg",
"--target-groups",
dest="target_groups",
default=TargetGroups.SELECTED,
help=f"""Which groups --group-by modifiers should apply to.
Either all groups, only selected groups, or only unselected groups.
Supported values are {[e.value for e in TargetGroups]}""",
)
event_parser.add_argument(
"-gs",
"--group-speeds",
dest="group_speeds",
type=Fraction,
nargs="+",
default=[],
help=f"""Speed multipliers for event groups created by '--group-by' options.
e.g.) 1/2 1/4 1/8 will speed multiply all of (0,20) (20,30) (30,39), in order.
But 1/2 with --target-groups {TargetGroups.SELECTED} will speed multiply only (20,30)""",
)
event_parser.add_argument(
"-gso",
"--group-speed-offsets",
dest="group_speed_offsets",
type=int,
default=[],
nargs="+",
help="Speed multiplier offsets for event group speeds",
)
return event_parser
| 36.151852 | 154 | 0.625858 |
7946b52178c47db508f562dea10453b4b16d4b30 | 562 | py | Python | py3plex/algorithms/infomap/examples/python/example-multiplex.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | 1 | 2020-02-20T07:37:02.000Z | 2020-02-20T07:37:02.000Z | py3plex/algorithms/infomap/examples/python/example-multiplex.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | null | null | null | py3plex/algorithms/infomap/examples/python/example-multiplex.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | null | null | null | from infomap import infomap
infomapWrapper = infomap.MemInfomap("--two-level --expanded")
# from (layer, node) to (layer, node) weight
infomapWrapper.addMultiplexLink(2, 1, 1, 2, 1.0)
infomapWrapper.addMultiplexLink(1, 2, 2, 1, 1.0)
infomapWrapper.addMultiplexLink(3, 2, 2, 3, 1.0)
infomapWrapper.run()
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
print("\n#layer node module:")
for node in tree.leafIter():
print("%d %d %d" % (node.stateIndex, node.physIndex, node.moduleIndex()))
| 28.1 | 89 | 0.718861 |
7946b522adf98416d3c53ee4e38ef273a70082dc | 6,399 | py | Python | regina_normalizer/ordinal_big_tuples.py | grammatek/regina_normalizer | 61ffeafcf1b967b44a3a9f99727013779bfeba13 | [
"Apache-2.0"
] | null | null | null | regina_normalizer/ordinal_big_tuples.py | grammatek/regina_normalizer | 61ffeafcf1b967b44a3a9f99727013779bfeba13 | [
"Apache-2.0"
] | null | null | null | regina_normalizer/ordinal_big_tuples.py | grammatek/regina_normalizer | 61ffeafcf1b967b44a3a9f99727013779bfeba13 | [
"Apache-2.0"
] | null | null | null | from . import number_patterns as npa
from . import tuple_rules as tp
ordinal_big_tuples = [(npa.ones_ptrn_no11 + "1\.000\.0([01][1-9]|10)\.$", '.*', 'millions', ' einmilljónasta og'),
("^1\.000\.0([01][1-9]|10)\.$", '.*', 'millions',' milljónasta og'),
(npa.ones_ptrn_no11 + "1\.000\.000\.$", '.*', 'millions', ' einmilljónasta'),
("^1\.000\.000\.$", '.*', 'millions', ' milljónasta'),
(npa.tns_ptrn + "10\.000\.0([01][1-9]|10)\.$", '.*', 'ten millions', ' tímilljónasta og'),
(npa.tns_ptrn + "10\.000\.000\.$", '.*', 'ten millions', ' tímilljónasta'),
(npa.hndrds_ptrn + "100\.000\.0([01][1-9]|10)\.$", '.*', 'hundred millions', ' eitt hundraðmilljónasta og'),
("^100\.000\.0([01][1-9]|10)\.$", '.*', 'hundred millions', 'hundraðmilljónasta og'),
(npa.hndrds_ptrn + "100(\.000){2}\.$", '.*', 'hundred millions', ' eitt hundraðmilljónasta'),
("^100(\.000){2}\.$", '.*', 'hundred millions', 'hundraðmilljónasta'),
(npa.ones_ptrn_no11 + "1(\.000){2}\.0([01][1-9]|10)\.$", '.*', 'billions', ' einmilljarðasta og'),
("^1(\.000){2}\.0([01][1-9]|10)\.$", '.*', 'billions', 'milljarðasta og'),
(npa.tns_ptrn + "10(\.000){2}\.0([01][1-9]|10)\.$", '.*', 'ten billions', 'tímilljarðasta og'),
(npa.hndrds_ptrn + "100(\.000){2}\.0([01][1-9]|10)\.$", '.*', 'hundred billions', ' eitt hundrað milljarðasta og'),
("^100(\.000){2}\.0([01][1-9]|10)\.$", '.*', 'hundred billions', 'hundraðmilljarðasta og'),
(npa.ones_ptrn_no11 + "1(\.000){3}\.$", '.*', 'billions', ' einmilljarðasta'),
("^1(\.000){3}\.$", '.*', 'billions', 'milljarðasta'),
(npa.tns_ptrn + "10(\.000){3}\.$", '.*', 'ten billions', 'tímilljarðasta'),
(npa.hndrds_ptrn + "100(\.000){3}\.$", '.*', 'hundred billions', ' eitt hundraðmilljarðasta'),
("^100(\.000){3}\.$", '.*', 'hundred billions', 'hundraðmilljarðasta')]
for rule, letter in tp.dozens_ordinal_letters:
ordinal_big_tuples.append((npa.ones_ptrn_no11 + "1\.000\.0([01][1-9]|10)\.$", rule, 'millions', ' einmilljón' + letter + ' og'))
ordinal_big_tuples.append(("^1\.000\.0([01][1-9]|10)\.$", rule, 'millions', ' milljón' + letter + ' og'))
ordinal_big_tuples.append((npa.ones_ptrn_no11 + "1(\.000){2}\.$", rule, 'millions', ' einmilljón' + letter))
ordinal_big_tuples.append(("^1(\.000){2}\.$", rule, 'millions', ' milljón' + letter))
ordinal_big_tuples.append((npa.tns_ptrn + "10\.000\.0([01][1-9]|10)\.$", rule, 'ten millions', ' tímilljón'+ letter + ' og'))
ordinal_big_tuples.append((npa.tns_ptrn + "10(\.000){2}\.$", rule, 'ten millions', ' tímilljón'+ letter))
ordinal_big_tuples.append((npa.hndrds_ptrn + "100\.000\.0([01][1-9]|10)\.$", rule, 'hundred millions', ' eitt hundraðmilljón' + letter + ' og'))
ordinal_big_tuples.append(("^100\.000\.0([01][1-9]|10)\.$", rule, 'hundred millions', 'hundraðmilljón' + letter + ' og'))
ordinal_big_tuples.append((npa.hndrds_ptrn + "100(\.000){2}\.$", rule, 'hundred millions', ' eitt hundraðmilljón' + letter))
ordinal_big_tuples.append(("^100(\.000){2}\.$", rule, 'hundred millions', 'hundraðmilljón' + letter))
ordinal_big_tuples.append((npa.ones_ptrn_no11 + "1(\.000){2}\.0([01][1-9]|10)\.$", rule, 'billions', ' einmilljarð' + letter + ' og'))
ordinal_big_tuples.append(("^1(\.000){2}\.0([01][1-9]|10)\.$", rule, 'billions', 'milljarð' + letter + ' og'))
ordinal_big_tuples.append((npa.tns_ptrn + "10(\.000){2}\.0([01][1-9]|10)\.$", rule, 'ten billions', ' tímilljarð' + letter + ' og'))
ordinal_big_tuples.append((npa.hndrds_ptrn + "100(\.000){2}\.0([01][1-9]|10)\.$", rule, 'hundred billions', ' eitt hundraðmilljarð' + letter + ' og'))
ordinal_big_tuples.append(("^100(\.000){2}\.0([01][1-9]|10)\.$", rule, 'hundred billions', 'hundraðmilljarð' + letter + ' og'))
ordinal_big_tuples.append((npa.ones_ptrn_no11 + "1(\.000){3}\.$", rule, 'billions', ' einmilljarð' + letter))
ordinal_big_tuples.append(("^1(\.000){3}\.$", rule, 'billions', 'milljarð' + letter))
ordinal_big_tuples.append((npa.tns_ptrn + "10(\.000){3}\.$", rule, 'ten billions', 'tímilljarð' + letter))
ordinal_big_tuples.append((npa.hndrds_ptrn + "100(\.000){3}\.$", rule, 'hundred billions', ' eitt hundraðmilljarð' + letter))
ordinal_big_tuples.append(("^100(\.000){3}\.$", rule, 'hundred billions', 'hundrað milljarð' + letter))
for string, number in tp.hundreds_thousands_zip:
ordinal_big_tuples.append((npa.hndrds_ptrn + number + "00\.000\.0([01][1-9]|10)\.$", rule, 'hundred millions', string + ' hundruðmilljón' + letter + ' og'))
ordinal_big_tuples.append((npa.hndrds_ptrn + number + "00(\.000){2}\.$", rule, 'hundred millions', string + ' hundruðmilljón' + letter))
ordinal_big_tuples.append((npa.hndrds_ptrn + number + "00(\.000){2}\.0([01][1-9]|10)\.$", rule, 'hundred billions', string + ' hundruðmilljarð' + letter + ' og'))
ordinal_big_tuples.append((npa.hndrds_ptrn + number + "00(\.000){3}\.$", rule, 'hundred billions', string + ' hundruðmilljarð' + letter))
for string, number in tp.dozens_zip:
ordinal_big_tuples.append((npa.tns_ptrn + number + "0\.000\.0([01][1-9]|10)\.$", rule, 'ten millions', string + 'milljón'+ letter + ' og'))
ordinal_big_tuples.append((npa.tns_ptrn + number + "0(\.000){2}\.$", rule, 'ten millions', string + 'milljón'+ letter))
ordinal_big_tuples.append((npa.tns_ptrn + number + "0(\.000){2}\.0([01][1-9]|10)\.$", rule, 'ten billions', string + 'milljarð' + letter + ' og'))
ordinal_big_tuples.append((npa.tns_ptrn + number + "0(\.000){3}\.$", rule, 'ten billions', string + 'milljarð' + letter))
for string, number in tp.mb_ordinal_zip:
ordinal_big_tuples.append((npa.ones_ptrn_11 + number + "\.000\.0([01][1-9]|10)\.$", rule, 'millions', string + 'milljón' + letter + ' og'))
ordinal_big_tuples.append((npa.ones_ptrn_11 + number + "(\.000){2}\.$", rule, 'millions', string + 'milljón' + letter))
ordinal_big_tuples.append((npa.ones_ptrn_11 + number + "(\.000){2}\.0([01][1-9]|10)\.$", rule, 'billions', string + 'milljarð' + letter + ' og'))
ordinal_big_tuples.append((npa.ones_ptrn_11 + number + "(\.000){3}\.$", rule, 'billions', string + 'milljarð' + letter))
for string, number in tp.tens_zip:
ordinal_big_tuples.append(("^[1-9]?" + number + "\.000\.0([01][1-9]|10)\.$", rule, 'millions', string + ' milljón' + letter + ' og'))
| 95.507463 | 170 | 0.601031 |
7946b548069a1b8eb421340a3baef4bf4cef5db3 | 843 | py | Python | pelayanan/migrations/0002_auto_20200716_1930.py | diaksizz/Adisatya | 1b20e523aede6ab3e8effb1ca63adf72016a6839 | [
"MIT"
] | null | null | null | pelayanan/migrations/0002_auto_20200716_1930.py | diaksizz/Adisatya | 1b20e523aede6ab3e8effb1ca63adf72016a6839 | [
"MIT"
] | 7 | 2021-03-30T14:04:35.000Z | 2022-01-13T03:07:50.000Z | pelayanan/migrations/0002_auto_20200716_1930.py | diaksizz/Adisatya | 1b20e523aede6ab3e8effb1ca63adf72016a6839 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-16 12:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pelayanan', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='pengaduan',
name='oleh',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='pengaduan',
name='respon',
field=models.TextField(blank=True, max_length=200, null=True),
),
migrations.DeleteModel(
name='Respon',
),
]
| 28.1 | 136 | 0.628707 |
7946b6258488efc2b2eadc73559fab954c84dba2 | 5,561 | py | Python | common/trainers/msrvid_trainer.py | karkaroff/castor | 881673f3dadb4f757fdfdf5d2ab9031e08512406 | [
"Apache-2.0"
] | 132 | 2017-04-02T12:31:55.000Z | 2019-03-09T07:53:29.000Z | common/trainers/msrvid_trainer.py | sudipta90/castor | fa2f59535c71a0fb4586afbe543b81ba812c8630 | [
"Apache-2.0"
] | 111 | 2017-04-01T23:00:24.000Z | 2019-03-10T08:29:20.000Z | common/trainers/msrvid_trainer.py | karkaroff/Castor | 881673f3dadb4f757fdfdf5d2ab9031e08512406 | [
"Apache-2.0"
] | 53 | 2017-04-06T01:17:18.000Z | 2019-02-27T03:10:35.000Z | import math
import time
import torch
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from scipy.stats import pearsonr
from .trainer import Trainer
from utils.serialization import save_checkpoint
class MSRVIDTrainer(Trainer):
def train_epoch(self, epoch):
self.model.train()
total_loss = 0
# since MSRVID doesn't have validation set, we manually leave-out some training data for validation
batches = math.ceil(len(self.train_loader.dataset.examples) / self.batch_size)
start_val_batch = math.floor(0.8 * batches)
left_out_val_a, left_out_val_b = [], []
left_out_val_ext_feats = []
left_out_val_labels = []
for batch_idx, batch in enumerate(self.train_loader):
# msrvid does not contain a validation set, we leave out some training data for validation to do model selection
if batch_idx >= start_val_batch:
left_out_val_a.append(batch.sentence_1)
left_out_val_b.append(batch.sentence_2)
left_out_val_ext_feats.append(batch.ext_feats)
left_out_val_labels.append(batch.label)
continue
self.optimizer.zero_grad()
# Select embedding
sent1, sent2 = self.get_sentence_embeddings(batch)
output = self.model(sent1, sent2, batch.ext_feats)
loss = F.kl_div(output, batch.label, size_average=False)
total_loss += loss.item()
loss.backward()
self.optimizer.step()
if batch_idx % self.log_interval == 0:
self.logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, min(batch_idx * self.batch_size, len(batch.dataset.examples)),
len(batch.dataset.examples),
100. * batch_idx / (len(self.train_loader)), loss.item() / len(batch))
)
self.evaluate(self.train_evaluator, 'train')
if self.use_tensorboard:
self.writer.add_scalar('msrvid/train/kl_div_loss', total_loss / len(self.train_loader.dataset.examples), epoch)
return left_out_val_a, left_out_val_b, left_out_val_ext_feats, left_out_val_labels
def train(self, epochs):
if self.lr_reduce_factor != 1 and self.lr_reduce_factor != None:
scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=self.lr_reduce_factor, patience=self.patience)
epoch_times = []
prev_loss = -1
best_dev_score = -1
for epoch in range(1, epochs + 1):
start = time.time()
self.logger.info('Epoch {} started...'.format(epoch))
left_out_a, left_out_b, left_out_ext_feats, left_out_label = self.train_epoch(epoch)
# manually evaluating the validating set
all_predictions, all_true_labels = [], []
val_kl_div_loss = 0
for i in range(len(left_out_a)):
# Select embedding
sent1 = self.embedding(left_out_a[i]).transpose(1, 2)
sent2 = self.embedding(left_out_b[i]).transpose(1, 2)
output = self.model(sent1, sent2, left_out_ext_feats[i])
val_kl_div_loss += F.kl_div(output, left_out_label[i], size_average=False).item()
predict_classes = left_out_a[i].new_tensor(torch.arange(0, self.train_loader.dataset.NUM_CLASSES))\
.float().expand(len(left_out_a[i]), self.train_loader.dataset.NUM_CLASSES)
predictions = (predict_classes * output.detach().exp()).sum(dim=1)
true_labels = (predict_classes * left_out_label[i].detach()).sum(dim=1)
all_predictions.append(predictions)
all_true_labels.append(true_labels)
predictions = torch.cat(all_predictions).cpu().numpy()
true_labels = torch.cat(all_true_labels).cpu().numpy()
pearson_r = pearsonr(predictions, true_labels)[0]
val_kl_div_loss /= len(predictions)
if self.use_tensorboard:
self.writer.add_scalar('msrvid/dev/pearson_r', pearson_r, epoch)
for param_group in self.optimizer.param_groups:
self.logger.info('Validation size: %s Pearson\'s r: %s', output.size(0), pearson_r)
self.logger.info('Learning rate: %s', param_group['lr'])
if self.use_tensorboard:
self.writer.add_scalar('msrvid/lr', param_group['lr'], epoch)
self.writer.add_scalar('msrvid/dev/kl_div_loss', val_kl_div_loss, epoch)
break
if scheduler is not None:
scheduler.step(pearson_r)
end = time.time()
duration = end - start
self.logger.info('Epoch {} finished in {:.2f} minutes'.format(epoch, duration / 60))
epoch_times.append(duration)
if pearson_r > best_dev_score:
best_dev_score = pearson_r
save_checkpoint(epoch, self.model.arch, self.model.state_dict(), self.optimizer.state_dict(), best_dev_score, self.model_outfile)
if abs(prev_loss - val_kl_div_loss) <= 0.0005:
self.logger.info('Early stopping. Loss changed by less than 0.0005.')
break
prev_loss = val_kl_div_loss
self.evaluate(self.test_evaluator, 'test')
self.logger.info('Training took {:.2f} minutes overall...'.format(sum(epoch_times) / 60)) | 45.211382 | 145 | 0.617335 |
7946b64b9aae4a515e58772047a42a65db69c0b5 | 37,753 | py | Python | nodebox/gui/mac/__init__.py | nodebox/nodebox-pyobjc | 31c7a95ca24fffdc8f4523278d4b68c330adea8e | [
"MIT"
] | 47 | 2015-03-14T01:44:09.000Z | 2021-11-10T10:28:14.000Z | nodebox/gui/mac/__init__.py | nodebox/nodebox-pyobjc | 31c7a95ca24fffdc8f4523278d4b68c330adea8e | [
"MIT"
] | 4 | 2015-08-20T20:02:32.000Z | 2021-02-10T18:39:11.000Z | nodebox/gui/mac/__init__.py | nodebox/nodebox-pyobjc | 31c7a95ca24fffdc8f4523278d4b68c330adea8e | [
"MIT"
] | 15 | 2015-03-14T01:44:00.000Z | 2020-12-17T16:44:31.000Z | import sys
import os
import traceback, linecache
import re
import objc
import time
import random
from Foundation import *
from AppKit import *
from threading import Thread
from nodebox.gui.mac.ValueLadder import MAGICVAR
from nodebox.gui.mac import PyDETextView
from nodebox.gui.mac.util import errorAlert
from nodebox import util
from nodebox import graphics
# AppleScript enumerator codes for PDF and Quicktime export
PDF = 0x70646678 # 'pdfx'
QUICKTIME = 0x71747878 # 'qt '
VERY_LIGHT_GRAY = NSColor.blackColor().blendedColorWithFraction_ofColor_(0.95, NSColor.whiteColor())
DARKER_GRAY = NSColor.blackColor().blendedColorWithFraction_ofColor_(0.8, NSColor.whiteColor())
from nodebox.gui.mac.dashboard import *
from nodebox.gui.mac.progressbar import ProgressBarController
class ExportCommand(NSScriptCommand):
pass
class OutputFile(object):
def __init__(self, data, isErr=False):
self.data = data
self.isErr = isErr
def write(self, data):
if isinstance(data, str):
try:
data = unicode(data, "utf_8", "replace")
except UnicodeDecodeError:
data = "XXX " + repr(data)
self.data.append((self.isErr, data))
# class defined in NodeBoxDocument.xib
class NodeBoxDocument(NSDocument):
graphicsView = objc.IBOutlet()
outputView = objc.IBOutlet()
textView = objc.IBOutlet()
window = objc.IBOutlet()
variablesController = objc.IBOutlet()
dashboardController = objc.IBOutlet()
animationSpinner = objc.IBOutlet()
# The ExportImageAccessory adds:
exportImageAccessory = objc.IBOutlet()
exportImageFormat = objc.IBOutlet()
exportImagePageCount = objc.IBOutlet()
# The ExportMovieAccessory adds:
exportMovieAccessory = objc.IBOutlet()
exportMovieFrames = objc.IBOutlet()
exportMovieFps = objc.IBOutlet()
# When the PageCount accessory is loaded, we also add:
pageCount = objc.IBOutlet()
pageCountAccessory = objc.IBOutlet()
# When the ExportSheet is loaded, we also add:
exportSheet = objc.IBOutlet()
exportSheetIndicator = objc.IBOutlet()
path = None
exportDir = None
magicvar = None # Used for value ladders.
_code = None
vars = []
movie = None
def windowNibName(self):
return "NodeBoxDocument"
def init(self):
self = super(NodeBoxDocument, self).init()
nc = NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(self, "textFontChanged:", "PyDETextFontChanged", None)
self.namespace = {}
self.canvas = graphics.Canvas()
self.context = graphics.Context(self.canvas, self.namespace)
self.animationTimer = None
self.__doc__ = {}
self._pageNumber = 1
self._frame = 150
self.fullScreen = None
self._seed = time.time()
self.currentView = self.graphicsView
return self
def autosavesInPlace(self):
return True
def close(self):
self.stopScript()
super(NodeBoxDocument, self).close()
def __del__(self):
nc = NSNotificationCenter.defaultCenter()
nc.removeObserver_name_object_(self, "PyDETextFontChanged", None)
# text view has a couple of circular refs, it can let go of them now
self.textView._cleanup()
def textFontChanged_(self, notification):
font = PyDETextView.getBasicTextAttributes()[NSFontAttributeName]
self.outputView.setFont_(font)
def readFromFile_ofType_(self, path, tp):
if self.textView is None:
# we're not yet fully loaded
self.path = path
else:
# "revert"
self.readFromUTF8(path)
return True
def writeToFile_ofType_(self, path, tp):
f = file(path, "w")
text = self.textView.string()
f.write(text.encode("utf8"))
f.close()
return True
def windowControllerDidLoadNib_(self, controller):
if self.path:
self.readFromUTF8(self.path)
font = PyDETextView.getBasicTextAttributes()[NSFontAttributeName]
self.outputView.setFont_(font)
self.textView.window().makeFirstResponder_(self.textView)
self.windowControllers()[0].setWindowFrameAutosaveName_("NodeBoxDocumentWindow")
def readFromUTF8(self, path):
f = file(path)
text = unicode(f.read(), "utf_8")
f.close()
self.textView.setString_(text)
self.textView.usesTabs = "\t" in text
def cleanRun(self, fn, newSeed = True, buildInterface=True):
self.animationSpinner.startAnimation_(None)
# Prepare everything for running the script
self.prepareRun()
# Run the actual script
success = self.fastRun(fn, newSeed)
self.animationSpinner.stopAnimation_(None)
if success and buildInterface:
# Build the interface
self.vars = self.namespace["_ctx"]._vars
if len(self.vars) > 0:
self.buildInterface_(None)
return success
def prepareRun(self):
# Compile the script
success, output = self._boxedRun(self._compileScript)
self._flushOutput(output)
if not success:
return False
# Initialize the namespace
self._initNamespace()
# Reset the pagenum
self._pageNum = 1
# Reset the frame
self._frame = 1
self.speed = self.canvas.speed = None
def fastRun(self, fn, newSeed = False):
# Check if there is code to run
if self._code is None:
return False
# Clear the canvas
self.canvas.clear()
# Generate a new seed, if needed
if newSeed:
self._seed = time.time()
random.seed(self._seed)
# Set the mouse position
window = self.currentView.window()
pt = window.mouseLocationOutsideOfEventStream()
mx, my = window.contentView().convertPoint_toView_(pt, self.currentView)
# Hack: mouse coordinates are flipped vertically in FullscreenView.
# This flips them back.
if isinstance(self.currentView, FullscreenView):
my = self.currentView.bounds()[1][1] - my
if self.fullScreen is None:
mx /= self.currentView.zoom
my /= self.currentView.zoom
self.namespace["MOUSEX"], self.namespace["MOUSEY"] = mx, my
self.namespace["mousedown"] = self.currentView.mousedown
self.namespace["keydown"] = self.currentView.keydown
self.namespace["key"] = self.currentView.key
self.namespace["keycode"] = self.currentView.keycode
self.namespace["scrollwheel"] = self.currentView.scrollwheel
self.namespace["wheeldelta"] = self.currentView.wheeldelta
# Reset the context
self.context._resetContext()
# Initalize the magicvar
self.namespace[MAGICVAR] = self.magicvar
# Set the pagenum
self.namespace['PAGENUM'] = self._pageNumber
# Set the frame
self.namespace['FRAME'] = self._frame
# Run the script
success, output = self._boxedRun(fn)
self._flushOutput(output)
if not success:
return False
# Display the output of the script
self.currentView.setCanvas(self.canvas)
return True
@objc.IBAction
def runFullscreen_(self, sender):
if self.fullScreen is not None: return
self.stopScript()
self.currentView = FullscreenView.alloc().init()
self.currentView.canvas = None
fullRect = NSScreen.mainScreen().frame()
self.fullScreen = FullscreenWindow.alloc().initWithRect_(fullRect)
self.fullScreen.setContentView_(self.currentView)
self.fullScreen.makeKeyAndOrderFront_(self)
self.fullScreen.makeFirstResponder_(self.currentView)
NSMenu.setMenuBarVisible_(False)
NSCursor.hide()
self._runScript()
@objc.IBAction
def runScript_(self, sender):
self.runScript()
def runScript(self, compile=True, newSeed=True):
if self.fullScreen is not None: return
self.currentView = self.graphicsView
self._runScript(compile, newSeed)
def _runScript(self, compile=True, newSeed=True):
if not self.cleanRun(self._execScript):
pass
# Check whether we are dealing with animation
if self.canvas.speed is not None:
if not self.namespace.has_key("draw"):
errorAlert("Not a proper NodeBox animation",
"NodeBox animations should have at least a draw() method.")
return
# Check if animationTimer is already running
if self.animationTimer is not None:
self.stopScript()
self.speed = self.canvas.speed
# Run setup routine
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
window = self.currentView.window()
window.makeFirstResponder_(self.currentView)
# Start the timer
self.animationTimer = NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
1.0 / self.speed, self, objc.selector(self.doFrame, signature="v@:@"), None, True)
# Start the spinner
self.animationSpinner.startAnimation_(None)
def runScriptFast(self):
if self.animationTimer is None:
self.fastRun(self._execScript)
else:
# XXX: This can be sped up. We just run _execScript to get the
# method with __MAGICVAR__ into the namespace, and execute
# that, so it should only be called once for animations.
self.fastRun(self._execScript)
self.fastRun(self.namespace["draw"])
def doFrame(self):
self.fastRun(self.namespace["draw"], newSeed=True)
self._frame += 1
def source(self):
return self.textView.string()
def setSource_(self, source):
self.textView.setString_(source)
@objc.IBAction
def stopScript_(self, sender=None):
self.stopScript()
def stopScript(self):
if self.namespace.has_key("stop"):
success, output = self._boxedRun(self.namespace["stop"])
self._flushOutput(output)
self.animationSpinner.stopAnimation_(None)
if self.animationTimer is not None:
self.animationTimer.invalidate()
self.animationTimer = None
if self.fullScreen is not None:
self.currentView = self.graphicsView
self.fullScreen = None
NSMenu.setMenuBarVisible_(True)
NSCursor.unhide()
self.textView.hideValueLadder()
window = self.textView.window()
window.makeFirstResponder_(self.textView)
def _compileScript(self, source=None):
if source is None:
source = self.textView.string()
self._code = None
self._code = compile(source + "\n\n", self.scriptName.encode('ascii', 'ignore'), "exec")
def _initNamespace(self):
self.namespace.clear()
# Add everything from the namespace
for name in graphics.__all__:
self.namespace[name] = getattr(graphics, name)
for name in util.__all__:
self.namespace[name] = getattr(util, name)
# Add everything from the context object
self.namespace["_ctx"] = self.context
for attrName in dir(self.context):
self.namespace[attrName] = getattr(self.context, attrName)
# Add the document global
self.namespace["__doc__"] = self.__doc__
# Add the page number
self.namespace["PAGENUM"] = self._pageNumber
# Add the frame number
self.namespace["FRAME"] = self._frame
# Add the magic var
self.namespace[MAGICVAR] = self.magicvar
# XXX: will be empty after reset.
#for var in self.vars:
# self.namespace[var.name] = var.value
def _execScript(self):
exec self._code in self.namespace
self.__doc__ = self.namespace.get("__doc__", self.__doc__)
def _boxedRun(self, method, args=[]):
"""
Runs the given method in a boxed environment.
Boxed environments:
- Have their current directory set to the directory of the file
- Have their argument set to the filename
- Have their outputs redirect to an output stream.
Returns:
A tuple containing:
- A boolean indicating whether the run was successful
- The OutputFile
"""
self.scriptName = self.fileName()
libDir = os.path.join(os.getenv("HOME"), "Library", "Application Support", "NodeBox")
if not self.scriptName:
curDir = os.getenv("HOME")
self.scriptName = "<untitled>"
else:
curDir = os.path.dirname(self.scriptName)
save = sys.stdout, sys.stderr
saveDir = os.getcwd()
saveArgv = sys.argv
sys.argv = [self.scriptName]
if os.path.exists(libDir):
sys.path.insert(0, libDir)
os.chdir(curDir)
sys.path.insert(0, curDir)
output = []
sys.stdout = OutputFile(output, False)
sys.stderr = OutputFile(output, True)
self._scriptDone = False
try:
if self.animationTimer is None:
pass
# Creating a thread is a heavy operation,
# don't install it when animating, where speed is crucial
#t = Thread(target=self._userCancelledMonitor, name="UserCancelledMonitor")
#t.start()
try:
method(*args)
except KeyboardInterrupt:
self.stopScript()
except:
etype, value, tb = sys.exc_info()
if tb.tb_next is not None:
tb = tb.tb_next # skip the frame doing the exec
traceback.print_exception(etype, value, tb)
etype = value = tb = None
return False, output
finally:
self._scriptDone = True
sys.stdout, sys.stderr = save
os.chdir(saveDir)
sys.path.remove(curDir)
try:
sys.path.remove(libDir)
except ValueError:
pass
sys.argv = saveArgv
#self._flushOutput()
return True, output
# from Mac/Tools/IDE/PyEdit.py
def _userCancelledMonitor(self):
import time
from signal import SIGINT
from Carbon import Evt
while not self._scriptDone:
if Evt.CheckEventQueueForUserCancel():
# Send a SIGINT signal to ourselves.
# This gets delivered to the main thread,
# cancelling the running script.
os.kill(os.getpid(), SIGINT)
break
time.sleep(0.25)
def _flushOutput(self, output):
outAttrs = PyDETextView.getBasicTextAttributes()
errAttrs = outAttrs.copy()
# XXX err color from user defaults...
errAttrs[NSForegroundColorAttributeName] = NSColor.redColor()
outputView = self.outputView
outputView.setSelectedRange_((outputView.textStorage().length(), 0))
lastErr = None
for isErr, data in output:
if isErr != lastErr:
attrs = [outAttrs, errAttrs][isErr]
outputView.setTypingAttributes_(attrs)
lastErr = isErr
outputView.insertText_(data)
# del self.output
@objc.IBAction
def copyImageAsPDF_(self, sender):
pboard = NSPasteboard.generalPasteboard()
# graphicsView implements the pboard delegate method to provide the data
pboard.declareTypes_owner_([NSPDFPboardType,NSPostScriptPboardType,NSTIFFPboardType], self.graphicsView)
@objc.IBAction
def exportAsImage_(self, sender):
exportPanel = NSSavePanel.savePanel()
exportPanel.setRequiredFileType_("pdf")
exportPanel.setNameFieldLabel_("Export To:")
exportPanel.setPrompt_("Export")
exportPanel.setCanSelectHiddenExtension_(True)
if not NSBundle.loadNibNamed_owner_("ExportImageAccessory", self):
NSLog("Error -- could not load ExportImageAccessory.")
self.exportImagePageCount.setIntValue_(1)
exportPanel.setAccessoryView_(self.exportImageAccessory)
path = self.fileName()
if path:
dirName, fileName = os.path.split(path)
fileName, ext = os.path.splitext(fileName)
fileName += ".pdf"
else:
dirName, fileName = None, "Untitled.pdf"
# If a file was already exported, use that folder as the default.
if self.exportDir is not None:
dirName = self.exportDir
exportPanel.beginSheetForDirectory_file_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
dirName, fileName, NSApp().mainWindow(), self,
"exportPanelDidEnd:returnCode:contextInfo:", 0)
def exportPanelDidEnd_returnCode_contextInfo_(self, panel, returnCode, context):
if returnCode:
fname = panel.filename()
self.exportDir = os.path.split(fname)[0] # Save the directory we exported to.
pages = self.exportImagePageCount.intValue()
format = panel.requiredFileType()
panel.close()
self.doExportAsImage(fname, format, pages)
exportPanelDidEnd_returnCode_contextInfo_ = objc.selector(exportPanelDidEnd_returnCode_contextInfo_,
signature="v@:@ii")
@objc.IBAction
def exportImageFormatChanged_(self, sender):
image_formats = ('pdf', 'eps', 'png', 'tiff', 'jpg', 'gif')
panel = sender.window()
panel.setRequiredFileType_(image_formats[sender.indexOfSelectedItem()])
def doExportAsImage(self, fname, format, pages=1):
basename, ext = os.path.splitext(fname)
# When saving one page (the default), just save the current graphics
# context. When generating multiple pages, we run the script again
# (so we don't use the current displayed view) for the first page,
# and then for every next page.
if pages == 1:
if self.graphicsView.canvas is None:
self.runScript()
self.canvas.save(fname, format)
elif pages > 1:
pb = ProgressBarController.alloc().init()
pb.begin("Generating %s images..." % pages, pages)
try:
if not self.cleanRun(self._execScript): return
self._pageNumber = 1
self._frame = 1
# If the speed is set, we are dealing with animation
if self.canvas.speed is None:
for i in range(pages):
if i > 0: # Run has already happened first time
self.fastRun(self._execScript, newSeed=True)
counterAsString = "-%5d" % self._pageNumber
counterAsString = counterAsString.replace(' ', '0')
exportName = basename + counterAsString + ext
self.canvas.save(exportName, format)
self.graphicsView.setNeedsDisplay_(True)
self._pageNumber += 1
self._frame += 1
pb.inc()
else:
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
for i in range(pages):
self.fastRun(self.namespace["draw"], newSeed=True)
counterAsString = "-%5d" % self._pageNumber
counterAsString = counterAsString.replace(' ', '0')
exportName = basename + counterAsString + ext
self.canvas.save(exportName, format)
self.graphicsView.setNeedsDisplay_(True)
self._pageNumber += 1
self._frame += 1
pb.inc()
if self.namespace.has_key("stop"):
success, output = self._boxedRun(self.namespace["stop"])
self._flushOutput(output)
except KeyboardInterrupt:
pass
pb.end()
del pb
self._pageNumber = 1
self._frame = 1
@objc.IBAction
def exportAsMovie_(self, sender):
exportPanel = NSSavePanel.savePanel()
exportPanel.setRequiredFileType_("pdf")
exportPanel.setNameFieldLabel_("Export To:")
exportPanel.setPrompt_("Export")
exportPanel.setCanSelectHiddenExtension_(True)
exportPanel.setAllowedFileTypes_(["mov"])
if not NSBundle.loadNibNamed_owner_("ExportMovieAccessory", self):
NSLog("Error -- could not load ExportMovieAccessory.")
self.exportMovieFrames.setIntValue_(150)
self.exportMovieFps.setIntValue_(30)
exportPanel.setAccessoryView_(self.exportMovieAccessory)
path = self.fileName()
if path:
dirName, fileName = os.path.split(path)
fileName, ext = os.path.splitext(fileName)
fileName += ".mov"
else:
dirName, fileName = None, "Untitled.mov"
# If a file was already exported, use that folder as the default.
if self.exportDir is not None:
dirName = self.exportDir
exportPanel.beginSheetForDirectory_file_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
dirName, fileName, NSApp().mainWindow(), self,
"qtPanelDidEnd:returnCode:contextInfo:", 0)
def qtPanelDidEnd_returnCode_contextInfo_(self, panel, returnCode, context):
if returnCode:
fname = panel.filename()
self.exportDir = os.path.split(fname)[0] # Save the directory we exported to.
frames = self.exportMovieFrames.intValue()
fps = self.exportMovieFps.floatValue()
panel.close()
if frames <= 0 or fps <= 0: return
self.doExportAsMovie(fname, frames, fps)
qtPanelDidEnd_returnCode_contextInfo_ = objc.selector(qtPanelDidEnd_returnCode_contextInfo_,
signature="v@:@ii")
def doExportAsMovie(self, fname, frames=60, fps=30):
# Only load QTSupport when necessary.
# QTSupport loads QTKit, which wants to establish a connection to the window server.
# If we load QTSupport before something is on screen, the connection to the window server
# cannot be established.
from nodebox.util import QTSupport
try:
os.unlink(fname)
except:
pass
try:
fp = open(fname, 'w')
fp.close()
except:
errorAlert("File Error", "Could not create file '%s'. Perhaps it is locked or busy." % fname)
return
movie = None
pb = ProgressBarController.alloc().init()
pb.begin("Generating %s frames..." % frames, frames)
try:
if not self.cleanRun(self._execScript): return
self._pageNumber = 1
self._frame = 1
movie = QTSupport.Movie(fname, fps)
# If the speed is set, we are dealing with animation
if self.canvas.speed is None:
for i in range(frames):
if i > 0: # Run has already happened first time
self.fastRun(self._execScript, newSeed=True)
movie.add(self.canvas)
self.graphicsView.setNeedsDisplay_(True)
pb.inc()
self._pageNumber += 1
self._frame += 1
else:
if self.namespace.has_key("setup"):
self.fastRun(self.namespace["setup"])
for i in range(frames):
self.fastRun(self.namespace["draw"], newSeed=True)
movie.add(self.canvas)
self.graphicsView.setNeedsDisplay_(True)
pb.inc()
self._pageNumber += 1
self._frame += 1
if self.namespace.has_key("stop"):
success, output = self._boxedRun(self.namespace["stop"])
self._flushOutput(output)
except KeyboardInterrupt:
pass
pb.end()
del pb
movie.save()
self._pageNumber = 1
self._frame = 1
@objc.IBAction
def printDocument_(self, sender):
op = NSPrintOperation.printOperationWithView_printInfo_(self.graphicsView, self.printInfo())
op.runOperationModalForWindow_delegate_didRunSelector_contextInfo_(
NSApp().mainWindow(), self, "printOperationDidRun:success:contextInfo:",
0)
def printOperationDidRun_success_contextInfo_(self, op, success, info):
if success:
self.setPrintInfo_(op.printInfo())
printOperationDidRun_success_contextInfo_ = objc.selector(printOperationDidRun_success_contextInfo_,
signature="v@:@ci")
@objc.IBAction
def buildInterface_(self, sender):
self.dashboardController.buildInterface(self.vars)
def validateMenuItem_(self, menuItem):
if menuItem.action() in ("exportAsImage:", "exportAsMovie:"):
return self.canvas is not None
return True
# Zoom commands, forwarding to the graphics view.
@objc.IBAction
def zoomIn_(self, sender):
if self.fullScreen is not None: return
self.graphicsView.zoomIn_(sender)
@objc.IBAction
def zoomOut_(self, sender):
if self.fullScreen is not None: return
self.graphicsView.zoomOut_(sender)
@objc.IBAction
def zoomToTag_(self, sender):
if self.fullScreen is not None: return
self.graphicsView.zoomTo_(sender.tag() / 100.0)
@objc.IBAction
def zoomToFit_(self, sender):
if self.fullScreen is not None: return
self.graphicsView.zoomToFit_(sender)
class FullscreenWindow(NSWindow):
def initWithRect_(self, fullRect):
super(FullscreenWindow, self).initWithContentRect_styleMask_backing_defer_(fullRect, NSBorderlessWindowMask, NSBackingStoreBuffered, True)
return self
def canBecomeKeyWindow(self):
return True
class FullscreenView(NSView):
def init(self):
super(FullscreenView, self).init()
self.mousedown = False
self.keydown = False
self.key = None
self.keycode = None
self.scrollwheel = False
self.wheeldelta = 0.0
return self
def setCanvas(self, canvas):
self.canvas = canvas
self.setNeedsDisplay_(True)
if not hasattr(self, "screenRect"):
self.screenRect = NSScreen.mainScreen().frame()
cw, ch = self.canvas.size
sw, sh = self.screenRect[1]
self.scalingFactor = calc_scaling_factor(cw, ch, sw, sh)
nw, nh = cw * self.scalingFactor, ch * self.scalingFactor
self.scaledSize = nw, nh
self.dx = (sw - nw) / 2.0
self.dy = (sh - nh) / 2.0
def drawRect_(self, rect):
NSGraphicsContext.currentContext().saveGraphicsState()
NSColor.blackColor().set()
NSRectFill(rect)
if self.canvas is not None:
t = NSAffineTransform.transform()
t.translateXBy_yBy_(self.dx, self.dy)
t.scaleBy_(self.scalingFactor)
t.concat()
clip = NSBezierPath.bezierPathWithRect_( ((0, 0), (self.canvas.width, self.canvas.height)) )
clip.addClip()
self.canvas.draw()
NSGraphicsContext.currentContext().restoreGraphicsState()
def isFlipped(self):
return True
def mouseDown_(self, event):
self.mousedown = True
def mouseUp_(self, event):
self.mousedown = False
def keyDown_(self, event):
self.keydown = True
self.key = event.characters()
self.keycode = event.keyCode()
def keyUp_(self, event):
self.keydown = False
self.key = event.characters()
self.keycode = event.keyCode()
def scrollWheel_(self, event):
self.scrollwheel = True
self.wheeldelta = event.deltaY()
def canBecomeKeyView(self):
return True
def acceptsFirstResponder(self):
return True
def calc_scaling_factor(width, height, maxwidth, maxheight):
return min(float(maxwidth) / width, float(maxheight) / height)
class ZoomPanel(NSView):
pass
# class defined in NodeBoxGraphicsView.xib
class NodeBoxGraphicsView(NSView):
document = objc.IBOutlet()
zoomLevel = objc.IBOutlet()
zoomField = objc.IBOutlet()
zoomSlider = objc.IBOutlet()
# The zoom levels are 10%, 25%, 50%, 75%, 100%, 200% and so on up to 2000%.
zoomLevels = [0.1, 0.25, 0.5, 0.75]
zoom = 1.0
while zoom <= 20.0:
zoomLevels.append(zoom)
zoom += 1.0
def awakeFromNib(self):
self.canvas = None
self._dirty = False
self.mousedown = False
self.keydown = False
self.key = None
self.keycode = None
self.scrollwheel = False
self.wheeldelta = 0.0
self._zoom = 1.0
self.setFrameSize_( (graphics.DEFAULT_WIDTH, graphics.DEFAULT_HEIGHT) )
self.setFocusRingType_(NSFocusRingTypeExterior)
if self.superview() is not None:
self.superview().setBackgroundColor_(VERY_LIGHT_GRAY)
def setCanvas(self, canvas):
self.canvas = canvas
if canvas is not None:
w, h = self.canvas.size
self.setFrameSize_([w*self._zoom, h*self._zoom])
self.markDirty()
def _get_zoom(self):
return self._zoom
def _set_zoom(self, zoom):
self._zoom = zoom
self.zoomLevel.setTitle_("%i%%" % (self._zoom * 100.0))
self.zoomSlider.setFloatValue_(self._zoom * 100.0)
self.setCanvas(self.canvas)
zoom = property(_get_zoom, _set_zoom)
@objc.IBAction
def dragZoom_(self, sender):
self.zoom = self.zoomSlider.floatValue() / 100.0
self.setCanvas(self.canvas)
def findNearestZoomIndex(self, zoom):
"""Returns the nearest zoom level, and whether we found a direct, exact
match or a fuzzy match."""
try: # Search for a direct hit first.
idx = self.zoomLevels.index(zoom)
return idx, True
except ValueError: # Can't find the zoom level, try looking at the indexes.
idx = 0
try:
while self.zoomLevels[idx] < zoom:
idx += 1
except KeyError: # End of the list
idx = len(self.zoomLevels) - 1 # Just return the last index.
return idx, False
@objc.IBAction
def zoomIn_(self, sender):
idx, direct = self.findNearestZoomIndex(self.zoom)
# Direct hits are perfect, but indirect hits require a bit of help.
# Because of the way indirect hits are calculated, they are already
# rounded up to the upper zoom level; this means we don't need to add 1.
if direct:
idx += 1
idx = max(min(idx, len(self.zoomLevels)-1), 0)
self.zoom = self.zoomLevels[idx]
@objc.IBAction
def zoomOut_(self, sender):
idx, direct = self.findNearestZoomIndex(self.zoom)
idx -= 1
idx = max(min(idx, len(self.zoomLevels)-1), 0)
self.zoom = self.zoomLevels[idx]
@objc.IBAction
def resetZoom_(self, sender):
self.zoom = 1.0
def zoomTo_(self, zoom):
self.zoom = zoom
@objc.IBAction
def zoomToFit_(self, sender):
w, h = self.canvas.size
fw, fh = self.superview().frame()[1]
factor = min(fw / w, fh / h)
self.zoom = factor
def markDirty(self, redraw=True):
self._dirty = True
if redraw:
self.setNeedsDisplay_(True)
def setFrameSize_(self, size):
self._image = None
NSView.setFrameSize_(self, size)
def isOpaque(self):
return False
def isFlipped(self):
return True
def drawRect_(self, rect):
if self.canvas is not None:
NSGraphicsContext.currentContext().saveGraphicsState()
try:
if self.zoom != 1.0:
t = NSAffineTransform.transform()
t.scaleBy_(self.zoom)
t.concat()
clip = NSBezierPath.bezierPathWithRect_( ((0, 0), (self.canvas.width, self.canvas.height)) )
clip.addClip()
self.canvas.draw()
except:
# A lot of code just to display the error in the output view.
etype, value, tb = sys.exc_info()
if tb.tb_next is not None:
tb = tb.tb_next # skip the frame doing the exec
traceback.print_exception(etype, value, tb)
data = "".join(traceback.format_exception(etype, value, tb))
attrs = PyDETextView.getBasicTextAttributes()
attrs[NSForegroundColorAttributeName] = NSColor.redColor()
outputView = self.document.outputView
outputView.setSelectedRange_((outputView.textStorage().length(), 0))
outputView.setTypingAttributes_(attrs)
outputView.insertText_(data)
NSGraphicsContext.currentContext().restoreGraphicsState()
def _updateImage(self):
if self._dirty:
self._image = self.canvas._nsImage
self._dirty = False
# pasteboard delegate method
def pasteboard_provideDataForType_(self, pboard, type):
if NSPDFPboardType:
pboard.setData_forType_(self.pdfData, NSPDFPboardType)
elif NSPostScriptPboardType:
pboard.setData_forType_(self.epsData, NSPostScriptPboardType)
elif NSTIFFPboardType:
pboard.setData_forType_(self.tiffData, NSTIFFPboardType)
def _get_pdfData(self):
if self.canvas:
return self.canvas._getImageData('pdf')
pdfData = property(_get_pdfData)
def _get_epsData(self):
if self.canvas:
return self.canvas._getImageData('eps')
epsData = property(_get_epsData)
def _get_tiffData(self):
return self.canvas._getImageData('tiff')
tiffData = property(_get_tiffData)
def _get_pngData(self):
return self.canvas._getImageData('png')
pngData = property(_get_pngData)
def _get_gifData(self):
return self.canvas._getImageData('gif')
gifData = property(_get_gifData)
def _get_jpegData(self):
return self.canvas._getImageData('jpeg')
jpegData = property(_get_jpegData)
def mouseDown_(self, event):
self.mousedown = True
def mouseUp_(self, event):
self.mousedown = False
def keyDown_(self, event):
self.keydown = True
self.key = event.characters()
self.keycode = event.keyCode()
def keyUp_(self, event):
self.keydown = False
self.key = event.characters()
self.keycode = event.keyCode()
def scrollWheel_(self, event):
NSResponder.scrollWheel_(self, event)
self.scrollwheel = True
self.wheeldelta = event.deltaY()
def canBecomeKeyView(self):
return True
def acceptsFirstResponder(self):
return True
class NodeBoxAppDelegate(NSObject):
def awakeFromNib(self):
self._prefsController = None
libDir = os.path.join(os.getenv("HOME"), "Library", "Application Support", "NodeBox")
try:
if not os.path.exists(libDir):
os.mkdir(libDir)
f = open(os.path.join(libDir, "README"), "w")
f.write("In this directory, you can put Python libraries to make them available to your scripts.\n")
f.close()
except OSError: pass
except IOError: pass
@objc.IBAction
def showPreferencesPanel_(self, sender):
if self._prefsController is None:
from nodebox.gui.mac.preferences import NodeBoxPreferencesController
self._prefsController = NodeBoxPreferencesController.alloc().init()
self._prefsController.showWindow_(sender)
@objc.IBAction
def generateCode_(self, sender):
"""Generate a piece of NodeBox code using OttoBot"""
from nodebox.util.ottobot import genProgram
controller = NSDocumentController.sharedDocumentController()
doc = controller.newDocument_(sender)
doc = controller.currentDocument()
doc.textView.setString_(genProgram())
doc.runScript()
@objc.IBAction
def showHelp_(self, sender):
url = NSURL.URLWithString_("http://nodebox.net/code/index.php/Reference")
NSWorkspace.sharedWorkspace().openURL_(url)
@objc.IBAction
def showSite_(self, sender):
url = NSURL.URLWithString_("http://nodebox.net/")
NSWorkspace.sharedWorkspace().openURL_(url)
def applicationWillTerminate_(self, note):
import atexit
atexit._run_exitfuncs()
| 36.44112 | 146 | 0.606203 |
7946b709983160600294fd21ecd433e851450ad5 | 5,998 | py | Python | HyperAPI/hdp_api/routes/datasetReshapes.py | WassimAbida/HyperAPI | 654a72922148d23158e3e4ea4b105f07626c5c36 | [
"BSD-3-Clause"
] | null | null | null | HyperAPI/hdp_api/routes/datasetReshapes.py | WassimAbida/HyperAPI | 654a72922148d23158e3e4ea4b105f07626c5c36 | [
"BSD-3-Clause"
] | null | null | null | HyperAPI/hdp_api/routes/datasetReshapes.py | WassimAbida/HyperAPI | 654a72922148d23158e3e4ea4b105f07626c5c36 | [
"BSD-3-Clause"
] | null | null | null | from HyperAPI.hdp_api.base.resource import Resource
from HyperAPI.hdp_api.base.route import Route
class DatasetReshapes(Resource):
name = "datasetReshapes"
available_since = "3.0"
removed_since = None
class _getReshapes(Route):
name = "getReshapes"
httpMethod = Route.GET
path = "/projects/{project_ID}/reshapes"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
}
class _createReshape(Route):
name = "createReshape"
httpMethod = Route.POST
path = "/projects/{project_ID}/reshapes"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
}
class _getReshape(Route):
name = "getReshape"
httpMethod = Route.GET
path = "/projects/{project_ID}/reshapes/{reshape_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'reshape_ID': Route.VALIDATOR_OBJECTID,
}
class _updateReshape(Route):
name = "updateReshape"
httpMethod = Route.POST
path = "/projects/{project_ID}/reshapes/{reshape_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'reshape_ID': Route.VALIDATOR_OBJECTID,
}
class _deleteReshape(Route):
name = "deleteReshape"
httpMethod = Route.POST
path = "/projects/{project_ID}/reshapes/{reshape_ID}/delete"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'reshape_ID': Route.VALIDATOR_OBJECTID,
}
class _inheritReshape(Route):
name = "inheritReshape"
httpMethod = Route.POST
path = "/projects/{project_ID}/reshapes/{reshape_ID}/inherit"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'reshape_ID': Route.VALIDATOR_OBJECTID,
}
class _publishReshapeToEtl(Route):
name = "publishReshapeToEtl"
available_since = '3.3'
httpMethod = Route.POST
path = "/projects/{project_ID}/reshapes/{reshape_ID}/etlpublish"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'reshape_ID': Route.VALIDATOR_OBJECTID,
}
class _getReshapeGroups(Route):
name = "getReshapeGroups"
httpMethod = Route.GET
path = "/projects/{project_ID}/reshapes/{reshape_ID}/groups"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'reshape_ID': Route.VALIDATOR_OBJECTID,
}
class _getReshapesGroups(Route):
name = "getReshapesGroups"
httpMethod = Route.GET
path = "/projects/{project_ID}/reshapes/groups"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
}
class _createReshapeGroup(Route):
name = "createReshapeGroup"
httpMethod = Route.POST
path = "/projects/{project_ID}/reshapes/groups"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
}
class _getReshapeGroup(Route):
name = "getReshapeGroup"
httpMethod = Route.GET
path = "/projects/{project_ID}/reshapes/groups/{group_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'group_ID': Route.VALIDATOR_OBJECTID,
}
class _updateReshapeGroup(Route):
name = "updateReshapeGroup"
httpMethod = Route.POST
path = "/projects/{project_ID}/reshapes/groups/{group_ID}"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'group_ID': Route.VALIDATOR_OBJECTID,
}
class _deleteReshapeGroup(Route):
name = "deleteReshapeGroup"
httpMethod = Route.POST
path = "/projects/{project_ID}/reshapes/groups/{group_ID}/delete"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'group_ID': Route.VALIDATOR_OBJECTID,
}
class _describeReshape(Route):
name = "describeReshape"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/reshapes/describe"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
}
class _getReshapeDescription(Route):
name = "getReshapeDescription"
httpMethod = Route.GET
path = "/projects/{project_ID}/datasets/{dataset_ID}/reshapes/describe"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
}
class _exportSteps(Route):
name = "exportsteps"
httpMethod = Route.GET
path = "/projects/{project_ID}/datasets/{dataset_ID}/reshapes/exportsteps"
available_since = "3.6.1"
unavailable_on = ["4.0", "4.2"]
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
}
class _applyReshape(Route):
name = "applyReshape"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/reshapes/{reshape_ID}/apply"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
'reshape_ID': Route.VALIDATOR_OBJECTID,
}
class _removeReshape(Route):
name = "removeReshape"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/reshapes/remove"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
}
class _proposeAlterations(Route):
name = "proposeAlterations"
httpMethod = Route.POST
path = "/projects/{project_ID}/datasets/{dataset_ID}/reshapes/proposeAlterations"
_path_keys = {
'project_ID': Route.VALIDATOR_OBJECTID,
'dataset_ID': Route.VALIDATOR_OBJECTID,
}
| 33.322222 | 89 | 0.610037 |
7946b76c76898d73d3d83f723e8c9d9510318b69 | 2,280 | py | Python | base_agent/dialogue_stack.py | kandluis/droidlet | 3851f0bdac7bc63100cfbcf1c206a94658790352 | [
"MIT"
] | null | null | null | base_agent/dialogue_stack.py | kandluis/droidlet | 3851f0bdac7bc63100cfbcf1c206a94658790352 | [
"MIT"
] | null | null | null | base_agent/dialogue_stack.py | kandluis/droidlet | 3851f0bdac7bc63100cfbcf1c206a94658790352 | [
"MIT"
] | null | null | null | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
from base_agent.base_util import NextDialogueStep, ErrorWithResponse
class DialogueStack(object):
"""This class organizes and steps DialogueObjects."""
def __init__(self, agent, memory):
self.agent = agent
self.memory = memory
self.stack = []
def __getitem__(self, i):
"""Get the ith item on the stack """
return self.stack[i]
def peek(self):
"""Get the item on top of the DialogueStack"""
if self.stack:
return self.stack[-1]
else:
return None
def clear(self):
"""clear current stack"""
self.old_stack = self.stack
self.stack = []
def append(self, dialogue_object):
"""Append a dialogue_object to stack"""
self.stack.append(dialogue_object)
def append_new(self, cls, *args, **kwargs):
"""Construct a new DialogueObject and append to stack"""
self.stack.append(
cls(agent=self.agent, memory=self.memory, dialogue_stack=self, *args, **kwargs)
)
def step(self):
"""Process and step through the top-of-stack dialogue object."""
if len(self.stack) > 0:
# WARNING: check_finished increments the DialogueObject's current_step counter
while len(self.stack) > 0 and self.stack[-1].check_finished():
del self.stack[-1]
if len(self.stack) == 0:
return
try:
output_chat, step_data = self.stack[-1].step()
if output_chat:
self.agent.send_chat(output_chat)
# Update progeny_data of the current DialogueObject
if len(self.stack) > 1 and step_data is not None:
logging.debug("Update progeny_data={} stack={}".format(step_data, self.stack))
self.stack[-2].update_progeny_data(step_data)
except NextDialogueStep:
return
except ErrorWithResponse as err:
self.stack[-1].finished = True
self.agent.send_chat(err.chat)
return
def __len__(self):
"""Length of stack"""
return len(self.stack)
| 31.232877 | 98 | 0.575439 |
7946b7a6ce73732132cf4d923c9e8538cbf92610 | 2,913 | py | Python | prophy/tests/test_class_helpers.py | florczakraf/prophy | a42a6151a77b31afa05300fc2e1f52cc15a298cf | [
"MIT"
] | 14 | 2015-02-19T22:00:37.000Z | 2020-11-30T03:03:55.000Z | prophy/tests/test_class_helpers.py | florczakraf/prophy | a42a6151a77b31afa05300fc2e1f52cc15a298cf | [
"MIT"
] | 31 | 2015-06-22T11:11:10.000Z | 2021-05-12T06:35:47.000Z | prophy/tests/test_class_helpers.py | florczakraf/prophy | a42a6151a77b31afa05300fc2e1f52cc15a298cf | [
"MIT"
] | 16 | 2015-06-12T06:48:06.000Z | 2019-11-26T22:48:13.000Z | import prophy
def get_helper_classes():
class SubStruct(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
("a", prophy.array(prophy.u8, size=2)),
]
class OneStruct(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
("x", prophy.u8),
("y", prophy.u32),
("o", prophy.optional(prophy.u32)),
]
class SameStruct(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
("x", prophy.u8),
("y", prophy.u32),
("o", prophy.optional(prophy.u32)),
]
class DifferentStruct(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [
("x", prophy.u8),
("y", prophy.array(prophy.u32, size=1)),
("o", prophy.optional(prophy.u32)),
]
class OneEnum(prophy.with_metaclass(prophy.enum_generator, prophy.enum)):
_enumerators = [
("x", 1),
("y", 2),
]
class SameEnum(prophy.with_metaclass(prophy.enum_generator, prophy.enum)):
_enumerators = [
("x", 1),
("y", 2),
]
class DifferentEnum(prophy.with_metaclass(prophy.enum_generator, prophy.enum)):
_enumerators = [
("x", 1),
("y", 3),
]
class OneUnion(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [
("x", prophy.u8, 1),
("s", OneStruct, 2),
]
class SameUnion(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [
("x", prophy.u8, 1),
("s", OneStruct, 2),
]
class DifferentUnion(prophy.with_metaclass(prophy.union_generator, prophy.union)):
_descriptor = [
("x", prophy.u8, 1),
("s", OneStruct, 3),
]
return SubStruct, OneStruct, SameStruct, DifferentStruct, OneEnum, SameEnum, DifferentEnum, OneUnion, SameUnion, \
DifferentUnion
def test_prophy_structs_equality():
SubStruct, OneStruct, SameStruct, DifferentStruct, OneEnum, SameEnum, DifferentEnum, OneUnion, SameUnion, \
DifferentUnion = get_helper_classes()
check_eq(
OneStruct, SameStruct, DifferentStruct, OneEnum, OneUnion, float, object
)
check_eq(
OneEnum, SameEnum, DifferentEnum, OneStruct, OneUnion, float, object
)
check_eq(
OneUnion, SameUnion, DifferentUnion, OneStruct, OneEnum, float, object
)
def check_eq(one, same, *different_ones):
assert one == same
assert not one != same
assert same == one
assert not same != one
for different in different_ones:
assert one != different
assert not one == different
assert different != one
assert not different == one
| 30.34375 | 118 | 0.58977 |
7946b7df3c3eedcc089af29067ab4ae7c07477d8 | 3,290 | py | Python | sensor_ui/rotary_encoder.py | mattgrogan/ledmatrix | 3a54de98ab107cf1266404400c7eb576007c8b17 | [
"MIT"
] | 1 | 2017-10-27T20:27:13.000Z | 2017-10-27T20:27:13.000Z | sensor_ui/rotary_encoder.py | mattgrogan/ledmatrix | 3a54de98ab107cf1266404400c7eb576007c8b17 | [
"MIT"
] | null | null | null | sensor_ui/rotary_encoder.py | mattgrogan/ledmatrix | 3a54de98ab107cf1266404400c7eb576007c8b17 | [
"MIT"
] | null | null | null | from gpiozero import Button
class RotaryEncoder:
"""
Decode mechanical rotary encoder pulses.
The following example will print a Rotary Encoder change direction
from gpiozero import RotaryEncoder
def change(value):
if value == 1:
print("clockwise")
else # if change == -1
print("counterclockwise")
rotary = RotaryEncoder(2, 3)
rotary.when_rotated = change
Based in http://abyz.co.uk/rpi/pigpio/examples.html#Python_rotary_encoder_py
"""
gpioA = None
gpioB = None
levA = 0
levB = 0
lastGpio = None
when_rotated = lambda *args : None
def __init__(self, pinA, pinB, pull_up=False):
"""
Uses for dettect rotary encoder changes (set when_rotated attribute)
It takes one parameter which is +1 for clockwise and -1 for counterclockwise.
:param pinA int :
:param pinB int :
:pull_up bool :
The common contact should be NOT connected to ground?
"""
self.gpioA = Button(pinA, pull_up)
self.gpioB = Button(pinB, pull_up)
self.levA = 0
self.levB = 0
self.lastGpio = None
self.gpioA.when_pressed = lambda *args : self.pulse(self.gpioA, 1)
self.gpioA.when_released = lambda *args : self.pulse(self.gpioA, 0)
self.gpioB.when_pressed = lambda *args : self.pulse(self.gpioB, 1)
self.gpioB.when_released = lambda *args : self.pulse(self.gpioB, 0)
def pulse(self, gpio, level):
"""
Decode the rotary encoder pulse.
+---------+ +---------+ 0
| | | |
A | | | |
| | | |
+---------+ +---------+ +----- 1
+---------+ +---------+ 0
| | | |
B | | | |
| | | |
----+ +---------+ +---------+ 1
"""
if gpio == self.gpioA:
self.levA = level
else:
self.levB = level
if gpio != self.lastGpio:
self.lastGpio = gpio
if gpio == self.gpioA and level == 1:
if self.levB == 1:
self.when_rotated(1)
elif gpio == self.gpioB and level == 1:
if self.levA == 1:
self.when_rotated(-1)
else:
if self.levB == 1:
self.when_rotated(-1)
elif self.levA == 1:
self.when_rotated(1)
else:
if gpio == self.gpioA and level == 1:
if self.levB == 1:
self.when_rotated(1)
elif gpio == self.gpioB and level == 1:
if self.levA == 1:
self.when_rotated(-1)
else:
if self.levB == 1:
self.when_rotated(-1)
elif self.levA == 1:
self.when_rotated(1)
| 31.941748 | 86 | 0.427052 |
7946ba0d437f7d739eac3e2833ad3efb1acfe051 | 18,623 | py | Python | tests/io/test_json.py | Hhx1999/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 | [
"Apache-2.0"
] | 1 | 2022-03-08T05:17:52.000Z | 2022-03-08T05:17:52.000Z | tests/io/test_json.py | Hhx1999/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 | [
"Apache-2.0"
] | null | null | null | tests/io/test_json.py | Hhx1999/eznlp | 9d1397d8e9630c099295712cbcffa495353a3268 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
import jieba
from eznlp.io import JsonIO, SQuADIO, KarpathyIO, TextClsIO, BratIO
from eznlp.utils.chunk import detect_nested, filter_clashed_by_priority
class TestJsonIO(object):
"""
References
----------
[1] Lu and Roth. 2015. Joint Mention Extraction and Classification with Mention Hypergraphs. EMNLP 2015.
[2] Eberts and Ulges. 2019. Span-based joint entity and relation extraction with Transformer pre-training. ECAI 2020.
[3] Luan et al. 2019. A general framework for information extraction using dynamic span graphs. NAACL 2019.
[4] Zhong and Chen. 2020. A frustratingly easy approach for joint entity and relation extraction. NAACL 2020.
"""
def test_ace2004(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end')
train_data = io.read("data/ace-lu2015emnlp/ACE2004/train.json")
dev_data = io.read("data/ace-lu2015emnlp/ACE2004/dev.json")
test_data = io.read("data/ace-lu2015emnlp/ACE2004/test.json")
assert len(train_data) == 6_799
assert sum(len(ex['chunks']) for ex in train_data) == 22_207
assert max(ck[2]-ck[1] for ex in train_data for ck in ex['chunks']) == 57
assert len(dev_data) == 829
assert sum(len(ex['chunks']) for ex in dev_data) == 2_511
assert max(ck[2]-ck[1] for ex in dev_data for ck in ex['chunks']) == 35
assert len(test_data) == 879
assert sum(len(ex['chunks']) for ex in test_data) == 3_031
assert max(ck[2]-ck[1] for ex in test_data for ck in ex['chunks']) == 43
def test_ace2005(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end')
train_data = io.read("data/ace-lu2015emnlp/ACE2005/train.json")
dev_data = io.read("data/ace-lu2015emnlp/ACE2005/dev.json")
test_data = io.read("data/ace-lu2015emnlp/ACE2005/test.json")
assert len(train_data) == 7_336
assert sum(len(ex['chunks']) for ex in train_data) == 24_687
assert max(ck[2]-ck[1] for ex in train_data for ck in ex['chunks']) == 49
assert len(dev_data) == 958
assert sum(len(ex['chunks']) for ex in dev_data) == 3_217
assert max(ck[2]-ck[1] for ex in dev_data for ck in ex['chunks']) == 30
assert len(test_data) == 1_047
assert sum(len(ex['chunks']) for ex in test_data) == 3_027
assert max(ck[2]-ck[1] for ex in test_data for ck in ex['chunks']) == 27
def test_genia(self):
io = JsonIO(text_key='tokens', chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end')
train_data = io.read("data/genia/term.train.json")
test_data = io.read("data/genia/term.test.json")
assert len(train_data) == 16_528
assert sum(len(ex['chunks']) for ex in train_data) == 50_133
assert len(test_data) == 1_836
assert sum(len(ex['chunks']) for ex in test_data) == 5_466
def test_ace2004_rel(self):
io = JsonIO(relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail')
train_data = io.read("data/ace-luan2019naacl/ace04/cv0.train.json")
test_data = io.read("data/ace-luan2019naacl/ace04/cv0.test.json")
# #train + #test = 8683
assert len(train_data) == 6_898
assert sum(len(ex['chunks']) for ex in train_data) == 18_065
assert sum(len(ex['relations']) for ex in train_data) == 3_292
assert len(test_data) == 1_785
assert sum(len(ex['chunks']) for ex in test_data) == 4_670
assert sum(len(ex['relations']) for ex in test_data) == 795
def test_ace2005_rel(self):
io = JsonIO(relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail')
train_data = io.read("data/ace-luan2019naacl/ace05/train.json")
dev_data = io.read("data/ace-luan2019naacl/ace05/dev.json")
test_data = io.read("data/ace-luan2019naacl/ace05/test.json")
assert len(train_data) == 10_051
assert sum(len(ex['chunks']) for ex in train_data) == 26_473
assert sum(len(ex['relations']) for ex in train_data) == 4_788
assert len(dev_data) == 2_424
assert sum(len(ex['chunks']) for ex in dev_data) == 6_338
assert sum(len(ex['relations']) for ex in dev_data) == 1_131
assert len(test_data) == 2_050
assert sum(len(ex['chunks']) for ex in test_data) == 5_476
assert sum(len(ex['relations']) for ex in test_data) == 1_151
def test_conll2004(self):
json_io = JsonIO(text_key='tokens',
chunk_key='entities',
chunk_type_key='type',
chunk_start_key='start',
chunk_end_key='end',
relation_key='relations',
relation_type_key='type',
relation_head_key='head',
relation_tail_key='tail')
train_data = json_io.read("data/conll2004/conll04_train.json")
dev_data = json_io.read("data/conll2004/conll04_dev.json")
test_data = json_io.read("data/conll2004/conll04_test.json")
assert len(train_data) == 922
assert sum(len(ex['chunks']) for ex in train_data) == 3_377
assert sum(len(ex['relations']) for ex in train_data) == 1_283
assert len(dev_data) == 231
assert sum(len(ex['chunks']) for ex in dev_data) == 893
assert sum(len(ex['relations']) for ex in dev_data) == 343
assert len(test_data) == 288
assert sum(len(ex['chunks']) for ex in test_data) == 1_079
assert sum(len(ex['relations']) for ex in test_data) == 422
assert not any(detect_nested(ex['chunks']) for data in [train_data, dev_data, test_data] for ex in data)
assert all(filter_clashed_by_priority(ex['chunks'], allow_nested=False) == ex['chunks'] for data in [train_data, dev_data, test_data] for ex in data)
def test_SciERC(self):
json_io = JsonIO(text_key='tokens',
chunk_key='entities',
chunk_type_key='type',
chunk_start_key='start',
chunk_end_key='end',
relation_key='relations',
relation_type_key='type',
relation_head_key='head',
relation_tail_key='tail')
train_data = json_io.read("data/SciERC/scierc_train.json")
dev_data = json_io.read("data/SciERC/scierc_dev.json")
test_data = json_io.read("data/SciERC/scierc_test.json")
assert len(train_data) == 1_861
assert sum(len(ex['chunks']) for ex in train_data) == 5_598
assert sum(len(ex['relations']) for ex in train_data) == 3_215 # 4 duplicated relations dropped here
assert len(dev_data) == 275
assert sum(len(ex['chunks']) for ex in dev_data) == 811
assert sum(len(ex['relations']) for ex in dev_data) == 455
assert len(test_data) == 551
assert sum(len(ex['chunks']) for ex in test_data) == 1_685
assert sum(len(ex['relations']) for ex in test_data) == 974
assert any(detect_nested(ex['chunks']) for data in [train_data, dev_data, test_data] for ex in data)
assert all(filter_clashed_by_priority(ex['chunks'], allow_nested=True) == ex['chunks'] for data in [train_data, dev_data, test_data] for ex in data)
def test_ADE(self):
json_io = JsonIO(text_key='tokens',
chunk_key='entities',
chunk_type_key='type',
chunk_start_key='start',
chunk_end_key='end',
relation_key='relations',
relation_type_key='type',
relation_head_key='head',
relation_tail_key='tail')
data = json_io.read("data/ADE/ade_full.json")
assert len(data) == 4_272
assert sum(len(ex['chunks']) for ex in data) == 10_839
assert sum(len(ex['relations']) for ex in data) == 6_821
assert any(detect_nested(ex['chunks']) for ex in data)
assert all(filter_clashed_by_priority(ex['chunks'], allow_nested=True) == ex['chunks'] for ex in data)
def test_yidu_s4k(self):
io = JsonIO(is_tokenized=False,
tokenize_callback='char',
text_key='originalText',
chunk_key='entities',
chunk_type_key='label_type',
chunk_start_key='start_pos',
chunk_end_key='end_pos',
is_whole_piece=False,
encoding='utf-8-sig')
train_data1, train_errors1, train_mismatches1 = io.read("data/yidu_s4k/subtask1_training_part1.txt", return_errors=True)
train_data2, train_errors2, train_mismatches2 = io.read("data/yidu_s4k/subtask1_training_part2.txt", return_errors=True)
train_data, train_errors, train_mismatches = (train_data1 + train_data2,
train_errors1 + train_errors2,
train_mismatches1 + train_mismatches2)
test_data, test_errors, test_mismatches = io.read("data/yidu_s4k/subtask1_test_set_with_answer.json", return_errors=True)
assert len(train_data) == 1_000
assert sum(len(ex['chunks']) for ex in train_data) == 17_653
assert len(train_errors) == 0
assert len(train_mismatches) == 0
assert len(test_data) == 379
assert sum(len(ex['chunks']) for ex in test_data) == 6_002
assert len(test_errors) == 0
assert len(test_mismatches) == 0
assert not any(detect_nested(ex['chunks']) for data in [train_data, test_data] for ex in data)
assert all(filter_clashed_by_priority(ex['chunks'], allow_nested=False) == ex['chunks'] for data in [train_data, test_data] for ex in data)
def test_cmeee(self):
io = JsonIO(is_tokenized=False, tokenize_callback='char', text_key='text',
chunk_key='entities', chunk_type_key='type', chunk_start_key='start_idx', chunk_end_key='end_idx',
encoding='utf-8')
train_data, train_errors, train_mismatches = io.read("data/cblue/CMeEE/CMeEE_train_vz.json", return_errors=True)
dev_data, dev_errors, dev_mismatches = io.read("data/cblue/CMeEE/CMeEE_dev_vz.json", return_errors=True)
test_data = io.read("data/cblue/CMeEE/CMeEE_test_vz.json")
assert len(train_data) == 15_000
assert sum(len(ex['chunks']) for ex in train_data) == 61_796
assert len(train_errors) == 0
assert len(train_mismatches) == 0
assert len(dev_data) == 5_000
assert sum(len(ex['chunks']) for ex in dev_data) == 20_300
assert len(dev_errors) == 0
assert len(dev_mismatches) == 0
assert len(test_data) == 3_000
assert any(detect_nested(ex['chunks']) for data in [train_data, dev_data] for ex in data)
# TODO: clashed chunks?
# assert all(filter_clashed_by_priority(ex['chunks'], allow_nested=True) == ex['chunks'] for data in [train_data, dev_data] for ex in data)
def test_cmeie(self):
io = JsonIO(is_tokenized=False, tokenize_callback='char', text_key='text',
chunk_key='entities', chunk_type_key='type', chunk_start_key='start', chunk_end_key='end',
relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail',
encoding='utf-8')
train_data = io.read("data/cblue/CMeIE/CMeIE_train_vz.json")
dev_data = io.read("data/cblue/CMeIE/CMeIE_dev_vz.json")
test_data = io.read("data/cblue/CMeIE/CMeIE_test_vz.json")
assert len(train_data) == 14_339
assert sum(len(ex['chunks']) for ex in train_data) == 57_880
assert sum(len(ex['relations']) for ex in train_data) == 43_629
assert len(dev_data) == 3_585
assert sum(len(ex['chunks']) for ex in dev_data) == 14_167
assert sum(len(ex['relations']) for ex in dev_data) == 10_613
assert len(test_data) == 4_482
@pytest.mark.parametrize("is_whole_piece", [False, True])
def test_read_write_consistency(is_whole_piece):
brat_io = BratIO(tokenize_callback='char',
has_ins_space=True, ins_space_tokenize_callback=jieba.cut, max_len=200,
parse_attrs=True, parse_relations=True, encoding='utf-8')
json_io = JsonIO(is_tokenized=True,
attribute_key='attributes', attribute_type_key='type', attribute_chunk_key='entity',
relation_key='relations', relation_type_key='type', relation_head_key='head', relation_tail_key='tail',
is_whole_piece=is_whole_piece, encoding='utf-8')
src_fn = "data/HwaMei/demo.txt"
mark = "wp" if is_whole_piece else "nonwp"
trg_fn = f"data/HwaMei/demo-write-{mark}.json"
data = brat_io.read(src_fn)
json_io.write(data, trg_fn)
data_retr = json_io.read(trg_fn)
assert data_retr == data
class TestSQuADIO(object):
def test_squad_v2(self, spacy_nlp_en):
io = SQuADIO(tokenize_callback=spacy_nlp_en, verbose=False)
train_data, train_errors, train_mismatches = io.read("data/SQuAD/train-v2.0.json", return_errors=True)
dev_data, dev_errors, dev_mismatches = io.read("data/SQuAD/dev-v2.0.json", return_errors=True)
assert len(train_data) == 130_319
assert len(train_errors) == 0
assert len(train_mismatches) == 1_009
assert len(dev_data) == 11873
assert len(dev_errors) == 0
assert len(dev_mismatches) == 208
class TestKarpathyIO(object):
"""
References
----------
[1] Karpathy, et al. 2015. Deep visual-semantic alignments for generating image descriptions. CVPR 2015.
[2] Vinyals, et al. 2015. Show and tell: A neural image caption generator. CVPR 2015.
"""
def test_flickr8k(self):
io = KarpathyIO(img_folder="data/flickr8k/Flicker8k_Dataset")
train_data, dev_data, test_data = io.read("data/flickr8k/flickr8k-karpathy2015cvpr.json")
assert len(train_data) == 6_000
assert len(dev_data) == 1_000
assert len(test_data) == 1_000
def test_flickr30k(self):
io = KarpathyIO(img_folder="data/flickr30k/flickr30k-images")
train_data, dev_data, test_data = io.read("data/flickr30k/flickr30k-karpathy2015cvpr.json")
assert len(train_data) == 29_000
assert len(dev_data) == 1_014
assert len(test_data) == 1_000
def test_mscoco(self):
io = KarpathyIO(img_folder="data/mscoco/data2014")
train_data, dev_data, test_data = io.read("data/mscoco/mscoco-karpathy2015cvpr.json")
assert len(train_data) == 113_287
assert len(dev_data) == 5_000
assert len(test_data) == 5_000
class TestTextClsIO(object):
"""
References
----------
[1] Zhang et al. 2021. CBLUE: A Chinese Biomedical Language Understanding Evaluation Benchmark.
[2] https://github.com/CBLUEbenchmark/CBLUE
[3] https://tianchi.aliyun.com/dataset/dataDetail?dataId=113223
"""
def test_chip_ctc(self):
io = TextClsIO(is_tokenized=False, tokenize_callback=jieba.tokenize, text_key='text', mapping={" ": ""}, encoding='utf-8')
train_data = io.read("data/cblue/CHIP-CTC/CHIP-CTC_train.json")
dev_data = io.read("data/cblue/CHIP-CTC/CHIP-CTC_dev.json")
test_data = io.read("data/cblue/CHIP-CTC/CHIP-CTC_test.json")
assert len(train_data) == 22_962
assert len(dev_data) == 7_682
assert len(test_data) == 10_192 # 10_000?
def test_chip_sts(self):
io = TextClsIO(is_tokenized=False, tokenize_callback=jieba.tokenize, text_key='text1', paired_text_key='text2', mapping={" ": ""}, encoding='utf-8')
train_data = io.read("data/cblue/CHIP-STS/CHIP-STS_train.json")
dev_data = io.read("data/cblue/CHIP-STS/CHIP-STS_dev.json")
test_data = io.read("data/cblue/CHIP-STS/CHIP-STS_test.json")
assert len(train_data) == 16_000
assert len(dev_data) == 4_000
assert len(test_data) == 10_000
def test_kuake_qic(self):
io = TextClsIO(is_tokenized=False, tokenize_callback=jieba.tokenize, text_key='query', mapping={" ": ""}, encoding='utf-8')
train_data = io.read("data/cblue/KUAKE-QIC/KUAKE-QIC_train.json")
dev_data = io.read("data/cblue/KUAKE-QIC/KUAKE-QIC_dev.json")
test_data = io.read("data/cblue/KUAKE-QIC/KUAKE-QIC_test.json")
assert len(train_data) == 6_931
assert len(dev_data) == 1_955
assert len(test_data) == 1_994 # 1_944?
def test_kuake_qtr(self):
io = TextClsIO(is_tokenized=False, tokenize_callback=jieba.tokenize, text_key='query', paired_text_key='title', mapping={" ": ""}, encoding='utf-8')
train_data = io.read("data/cblue/KUAKE-QTR/KUAKE-QTR_train.json")
dev_data = io.read("data/cblue/KUAKE-QTR/KUAKE-QTR_dev.json")
test_data = io.read("data/cblue/KUAKE-QTR/KUAKE-QTR_test.json")
assert len(train_data) == 24_174
assert len(dev_data) == 2_913
assert len(test_data) == 5_465
def test_kuake_qqr(self):
io = TextClsIO(is_tokenized=False, tokenize_callback=jieba.tokenize, text_key='query1', paired_text_key='query2', mapping={" ": ""}, encoding='utf-8')
train_data = io.read("data/cblue/KUAKE-QQR/KUAKE-QQR_train.json")
dev_data = io.read("data/cblue/KUAKE-QQR/KUAKE-QQR_dev.json")
test_data = io.read("data/cblue/KUAKE-QQR/KUAKE-QQR_test.json")
assert len(train_data) == 15_000
assert len(dev_data) == 1_599 # 1_600
assert len(test_data) == 1_596
| 49.661333 | 158 | 0.615529 |
7946ba39a60ccaa81eb5ebc968f04ba161171ac8 | 658 | py | Python | mayan/apps/autoadmin/apps.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/autoadmin/apps.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/autoadmin/apps.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 257 | 2019-05-14T10:26:37.000Z | 2022-03-30T03:37:36.000Z | from django.conf import settings
from django.db.models.signals import post_save
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.apps import MayanAppConfig
from .handlers import handler_auto_admin_account_password_change
class AutoAdminAppConfig(MayanAppConfig):
has_tests = True
name = 'mayan.apps.autoadmin'
verbose_name = _('Auto administrator')
def ready(self):
super().ready()
post_save.connect(
dispatch_uid='autoadmin_handler_account_password_change',
receiver=handler_auto_admin_account_password_change,
sender=settings.AUTH_USER_MODEL
)
| 28.608696 | 69 | 0.746201 |
7946bc15718dd60639f3ac922f82102f9a8cc485 | 213 | py | Python | play.py | AcrobaticPanicc/stocks-manager | f8411c13ce049583bd4fa5b18a9266919f222edd | [
"MIT"
] | 93 | 2020-07-09T23:15:44.000Z | 2022-03-30T22:14:22.000Z | play.py | AcrobaticPanicc/stocks-manager | f8411c13ce049583bd4fa5b18a9266919f222edd | [
"MIT"
] | 1 | 2021-12-07T03:17:05.000Z | 2021-12-07T03:17:05.000Z | play.py | AcrobaticPanicc/stocks-manager | f8411c13ce049583bd4fa5b18a9266919f222edd | [
"MIT"
] | 16 | 2020-07-10T03:44:07.000Z | 2022-03-30T22:14:27.000Z | # from app.table import table
#
#
# from app import data
# import csv
#
# a = []
#
# with open('tickers.csv') as csvfile:
# res = csv.reader(csvfile)
# for i in res:
# a.append(i[1])
#
# print(a)
# | 14.2 | 38 | 0.56338 |
7946bd5a4094f4ce5d3536e6d7beb73dab9df4a9 | 221 | py | Python | publications/__init__.py | fusion-flap/flap_nstx | 234bc6d482357cde94b36c85819e9a3f292819d5 | [
"MIT"
] | null | null | null | publications/__init__.py | fusion-flap/flap_nstx | 234bc6d482357cde94b36c85819e9a3f292819d5 | [
"MIT"
] | 1 | 2019-10-03T22:25:58.000Z | 2021-10-06T10:31:11.000Z | publications/__init__.py | fusion-flap/flap_nstx | 234bc6d482357cde94b36c85819e9a3f292819d5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 1 16:05:39 2021
@author: mlampert
"""
from .read_ahmed_matlab_file import read_ahmed_fit_parameters, read_ahmed_edge_current, read_ahmed_matlab_file | 24.555556 | 110 | 0.760181 |
7946bd88ba0b577fb9f4885b80829cec6f26c919 | 11,059 | py | Python | tensorflow/python/estimator/export/export_test.py | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 5 | 2019-01-17T08:47:31.000Z | 2020-05-06T06:10:56.000Z | tensorflow/python/estimator/export/export_test.py | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 3 | 2020-03-24T18:15:52.000Z | 2021-02-02T22:28:38.000Z | tensorflow/python/estimator/export/export_test.py | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 3 | 2017-06-09T10:39:33.000Z | 2021-04-08T16:13:30.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
class ExportTest(test_util.TensorFlowTestCase):
def test_serving_input_receiver_constructor(self):
"""Tests that no errors are raised when input is expected."""
features = {
"feature0": constant_op.constant([0]),
u"feature1": constant_op.constant([1]),
"feature2": sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
}
receiver_tensors = {
"example0": array_ops.placeholder(dtypes.string, name="example0"),
u"example1": array_ops.placeholder(dtypes.string, name="example1"),
}
export.ServingInputReceiver(features, receiver_tensors)
def test_serving_input_receiver_features_invalid(self):
receiver_tensors = {
"example0": array_ops.placeholder(dtypes.string, name="example0"),
u"example1": array_ops.placeholder(dtypes.string, name="example1"),
}
with self.assertRaisesRegexp(ValueError, "features must be defined"):
export.ServingInputReceiver(
features=None,
receiver_tensors=receiver_tensors)
with self.assertRaisesRegexp(ValueError, "feature keys must be strings"):
export.ServingInputReceiver(
features={1: constant_op.constant([1])},
receiver_tensors=receiver_tensors)
with self.assertRaisesRegexp(
ValueError, "feature feature1 must be a Tensor or SparseTensor"):
export.ServingInputReceiver(
features={"feature1": [1]},
receiver_tensors=receiver_tensors)
def test_serving_input_receiver_receiver_tensors_invalid(self):
features = {
"feature0": constant_op.constant([0]),
u"feature1": constant_op.constant([1]),
"feature2": sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
}
with self.assertRaisesRegexp(
ValueError, "receiver_tensors must be defined"):
export.ServingInputReceiver(
features=features,
receiver_tensors=None)
with self.assertRaisesRegexp(
ValueError, "receiver_tensors keys must be strings"):
export.ServingInputReceiver(
features=features,
receiver_tensors={
1: array_ops.placeholder(dtypes.string, name="example0")})
with self.assertRaisesRegexp(
ValueError, "receiver_tensor example1 must be a Tensor"):
export.ServingInputReceiver(
features=features,
receiver_tensors={"example1": [1]})
def test_single_feature_single_receiver(self):
feature = constant_op.constant(5)
receiver_tensor = array_ops.placeholder(dtypes.string)
input_receiver = export.ServingInputReceiver(
feature, receiver_tensor)
# single feature is automatically named
feature_key, = input_receiver.features.keys()
self.assertEqual("feature", feature_key)
# single receiver is automatically named
receiver_key, = input_receiver.receiver_tensors.keys()
self.assertEqual("input", receiver_key)
def test_multi_feature_single_receiver(self):
features = {"foo": constant_op.constant(5),
"bar": constant_op.constant(6)}
receiver_tensor = array_ops.placeholder(dtypes.string)
_ = export.ServingInputReceiver(features, receiver_tensor)
def test_multi_feature_multi_receiver(self):
features = {"foo": constant_op.constant(5),
"bar": constant_op.constant(6)}
receiver_tensors = {"baz": array_ops.placeholder(dtypes.int64),
"qux": array_ops.placeholder(dtypes.float32)}
_ = export.ServingInputReceiver(features, receiver_tensors)
def test_feature_wrong_type(self):
feature = "not a tensor"
receiver_tensor = array_ops.placeholder(dtypes.string)
with self.assertRaises(ValueError):
_ = export.ServingInputReceiver(feature, receiver_tensor)
def test_receiver_wrong_type(self):
feature = constant_op.constant(5)
receiver_tensor = "not a tensor"
with self.assertRaises(ValueError):
_ = export.ServingInputReceiver(feature, receiver_tensor)
def test_build_parsing_serving_input_receiver_fn(self):
feature_spec = {"int_feature": parsing_ops.VarLenFeature(dtypes.int64),
"float_feature": parsing_ops.VarLenFeature(dtypes.float32)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
with ops.Graph().as_default():
serving_input_receiver = serving_input_receiver_fn()
self.assertEqual(set(["int_feature", "float_feature"]),
set(serving_input_receiver.features.keys()))
self.assertEqual(set(["examples"]),
set(serving_input_receiver.receiver_tensors.keys()))
example = example_pb2.Example()
text_format.Parse("features: { "
" feature: { "
" key: 'int_feature' "
" value: { "
" int64_list: { "
" value: [ 21, 2, 5 ] "
" } "
" } "
" } "
" feature: { "
" key: 'float_feature' "
" value: { "
" float_list: { "
" value: [ 525.25 ] "
" } "
" } "
" } "
"} ", example)
with self.test_session() as sess:
sparse_result = sess.run(
serving_input_receiver.features,
feed_dict={
serving_input_receiver.receiver_tensors["examples"].name:
[example.SerializeToString()]})
self.assertAllEqual([[0, 0], [0, 1], [0, 2]],
sparse_result["int_feature"].indices)
self.assertAllEqual([21, 2, 5],
sparse_result["int_feature"].values)
self.assertAllEqual([[0, 0]],
sparse_result["float_feature"].indices)
self.assertAllEqual([525.25],
sparse_result["float_feature"].values)
def test_build_raw_serving_input_receiver_fn(self):
features = {"feature_1": constant_op.constant(["hello"]),
"feature_2": constant_op.constant([42])}
serving_input_receiver_fn = export.build_raw_serving_input_receiver_fn(
features)
with ops.Graph().as_default():
serving_input_receiver = serving_input_receiver_fn()
self.assertEqual(set(["feature_1", "feature_2"]),
set(serving_input_receiver.features.keys()))
self.assertEqual(set(["feature_1", "feature_2"]),
set(serving_input_receiver.receiver_tensors.keys()))
self.assertEqual(
dtypes.string,
serving_input_receiver.receiver_tensors["feature_1"].dtype)
self.assertEqual(
dtypes.int32,
serving_input_receiver.receiver_tensors["feature_2"].dtype)
def test_build_all_signature_defs_explicit_default(self):
receiver_tensor = constant_op.constant(["11"])
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2": export_output.ClassificationOutput(classes=output_2),
"head-3": export_output.PredictOutput(outputs={
"some_output_3": output_3
}),
}
signature_defs = export.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(receiver_tensor,
output_1),
"head-2":
signature_def_utils.classification_signature_def(receiver_tensor,
output_2, None),
"head-3":
signature_def_utils.predict_signature_def({
"input": receiver_tensor
}, {"output": output_3})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_export_outputs_required(self):
receiver_tensor = constant_op.constant(["11"])
with self.assertRaises(ValueError) as e:
export.build_all_signature_defs(receiver_tensor, None)
self.assertEqual("export_outputs must be a dict.", str(e.exception))
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = export.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = export.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = export.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertTrue(int(time_1) < int(time_2))
self.assertTrue(int(time_2) < int(time_3))
if __name__ == "__main__":
test.main()
| 40.214545 | 80 | 0.65449 |
7946c02a7c60824df40e69916d67d2193847d83a | 8,085 | py | Python | src/cryptography/hazmat/primitives/asymmetric/dsa.py | zapfbandit/pyca-cryptography | 577e058798e0e6ae04eac07f958f1072e351e859 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2015-10-08T21:28:42.000Z | 2020-08-15T10:03:49.000Z | src/cryptography/hazmat/primitives/asymmetric/dsa.py | zapfbandit/pyca-cryptography | 577e058798e0e6ae04eac07f958f1072e351e859 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 68 | 2015-06-27T00:28:53.000Z | 2022-03-31T10:09:13.000Z | src/cryptography/hazmat/primitives/asymmetric/dsa.py | zapfbandit/pyca-cryptography | 577e058798e0e6ae04eac07f958f1072e351e859 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2016-04-11T20:22:38.000Z | 2018-09-20T20:39:54.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import abc
import typing
from cryptography.hazmat.backends import _get_backend
from cryptography.hazmat.primitives import _serialization, hashes
from cryptography.hazmat.primitives.asymmetric import (
AsymmetricSignatureContext,
AsymmetricVerificationContext,
utils as asym_utils,
)
class DSAParameters(metaclass=abc.ABCMeta):
@abc.abstractmethod
def generate_private_key(self) -> "DSAPrivateKey":
"""
Generates and returns a DSAPrivateKey.
"""
@abc.abstractmethod
def parameter_numbers(self) -> "DSAParameterNumbers":
"""
Returns a DSAParameterNumbers.
"""
DSAParametersWithNumbers = DSAParameters
class DSAPrivateKey(metaclass=abc.ABCMeta):
@abc.abstractproperty
def key_size(self) -> int:
"""
The bit length of the prime modulus.
"""
@abc.abstractmethod
def public_key(self) -> "DSAPublicKey":
"""
The DSAPublicKey associated with this private key.
"""
@abc.abstractmethod
def parameters(self) -> DSAParameters:
"""
The DSAParameters object associated with this private key.
"""
@abc.abstractmethod
def signer(
self,
signature_algorithm: hashes.HashAlgorithm,
) -> AsymmetricSignatureContext:
"""
Returns an AsymmetricSignatureContext used for signing data.
"""
@abc.abstractmethod
def sign(
self,
data: bytes,
algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm],
) -> bytes:
"""
Signs the data
"""
@abc.abstractmethod
def private_numbers(self) -> "DSAPrivateNumbers":
"""
Returns a DSAPrivateNumbers.
"""
@abc.abstractmethod
def private_bytes(
self,
encoding: _serialization.Encoding,
format: _serialization.PrivateFormat,
encryption_algorithm: _serialization.KeySerializationEncryption,
) -> bytes:
"""
Returns the key serialized as bytes.
"""
DSAPrivateKeyWithSerialization = DSAPrivateKey
class DSAPublicKey(metaclass=abc.ABCMeta):
@abc.abstractproperty
def key_size(self) -> int:
"""
The bit length of the prime modulus.
"""
@abc.abstractmethod
def parameters(self) -> DSAParameters:
"""
The DSAParameters object associated with this public key.
"""
@abc.abstractmethod
def verifier(
self,
signature: bytes,
signature_algorithm: hashes.HashAlgorithm,
) -> AsymmetricVerificationContext:
"""
Returns an AsymmetricVerificationContext used for signing data.
"""
@abc.abstractmethod
def public_numbers(self) -> "DSAPublicNumbers":
"""
Returns a DSAPublicNumbers.
"""
@abc.abstractmethod
def public_bytes(
self,
encoding: _serialization.Encoding,
format: _serialization.PublicFormat,
) -> bytes:
"""
Returns the key serialized as bytes.
"""
@abc.abstractmethod
def verify(
self,
signature: bytes,
data: bytes,
algorithm: typing.Union[asym_utils.Prehashed, hashes.HashAlgorithm],
):
"""
Verifies the signature of the data.
"""
DSAPublicKeyWithSerialization = DSAPublicKey
class DSAParameterNumbers(object):
def __init__(self, p: int, q: int, g: int):
if (
not isinstance(p, int)
or not isinstance(q, int)
or not isinstance(g, int)
):
raise TypeError(
"DSAParameterNumbers p, q, and g arguments must be integers."
)
self._p = p
self._q = q
self._g = g
p = property(lambda self: self._p)
q = property(lambda self: self._q)
g = property(lambda self: self._g)
def parameters(self, backend=None) -> DSAParameters:
backend = _get_backend(backend)
return backend.load_dsa_parameter_numbers(self)
def __eq__(self, other):
if not isinstance(other, DSAParameterNumbers):
return NotImplemented
return self.p == other.p and self.q == other.q and self.g == other.g
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
"<DSAParameterNumbers(p={self.p}, q={self.q}, "
"g={self.g})>".format(self=self)
)
class DSAPublicNumbers(object):
def __init__(self, y: int, parameter_numbers: DSAParameterNumbers):
if not isinstance(y, int):
raise TypeError("DSAPublicNumbers y argument must be an integer.")
if not isinstance(parameter_numbers, DSAParameterNumbers):
raise TypeError(
"parameter_numbers must be a DSAParameterNumbers instance."
)
self._y = y
self._parameter_numbers = parameter_numbers
y = property(lambda self: self._y)
parameter_numbers = property(lambda self: self._parameter_numbers)
def public_key(self, backend=None) -> DSAPublicKey:
backend = _get_backend(backend)
return backend.load_dsa_public_numbers(self)
def __eq__(self, other):
if not isinstance(other, DSAPublicNumbers):
return NotImplemented
return (
self.y == other.y
and self.parameter_numbers == other.parameter_numbers
)
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
"<DSAPublicNumbers(y={self.y}, "
"parameter_numbers={self.parameter_numbers})>".format(self=self)
)
class DSAPrivateNumbers(object):
def __init__(self, x: int, public_numbers: DSAPublicNumbers):
if not isinstance(x, int):
raise TypeError("DSAPrivateNumbers x argument must be an integer.")
if not isinstance(public_numbers, DSAPublicNumbers):
raise TypeError(
"public_numbers must be a DSAPublicNumbers instance."
)
self._public_numbers = public_numbers
self._x = x
x = property(lambda self: self._x)
public_numbers = property(lambda self: self._public_numbers)
def private_key(self, backend=None) -> DSAPrivateKey:
backend = _get_backend(backend)
return backend.load_dsa_private_numbers(self)
def __eq__(self, other):
if not isinstance(other, DSAPrivateNumbers):
return NotImplemented
return (
self.x == other.x and self.public_numbers == other.public_numbers
)
def __ne__(self, other):
return not self == other
def generate_parameters(key_size: int, backend=None) -> DSAParameters:
backend = _get_backend(backend)
return backend.generate_dsa_parameters(key_size)
def generate_private_key(key_size: int, backend=None) -> DSAPrivateKey:
backend = _get_backend(backend)
return backend.generate_dsa_private_key_and_parameters(key_size)
def _check_dsa_parameters(parameters: DSAParameterNumbers):
if parameters.p.bit_length() not in [1024, 2048, 3072, 4096]:
raise ValueError(
"p must be exactly 1024, 2048, 3072, or 4096 bits long"
)
if parameters.q.bit_length() not in [160, 224, 256]:
raise ValueError("q must be exactly 160, 224, or 256 bits long")
if not (1 < parameters.g < parameters.p):
raise ValueError("g, p don't satisfy 1 < g < p.")
def _check_dsa_private_numbers(numbers: DSAPrivateNumbers):
parameters = numbers.public_numbers.parameter_numbers
_check_dsa_parameters(parameters)
if numbers.x <= 0 or numbers.x >= parameters.q:
raise ValueError("x must be > 0 and < q.")
if numbers.public_numbers.y != pow(parameters.g, numbers.x, parameters.p):
raise ValueError("y must be equal to (g ** x % p).")
| 28.170732 | 79 | 0.635993 |
7946c10e6b8f4d297de3f705c48872fc8a6a5c72 | 5,908 | py | Python | tensorflow_probability/python/internal/test_combinations.py | bolcom/probability | 4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7 | [
"Apache-2.0"
] | 1 | 2019-10-10T06:15:42.000Z | 2019-10-10T06:15:42.000Z | tensorflow_probability/python/internal/test_combinations.py | bolcom/probability | 4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/internal/test_combinations.py | bolcom/probability | 4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7 | [
"Apache-2.0"
] | 1 | 2019-10-10T06:15:44.000Z | 2019-10-10T06:15:44.000Z | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Decorators for testing TFP code under combinations of TF features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import tensorflow.compat.v2 as tf
from tensorflow.python.eager import def_function # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import combinations # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import test_combinations # pylint: disable=g-direct-tensorflow-import
__all__ = [
'test_all_tf_execution_regimes',
'test_graph_and_eager_modes',
]
@contextlib.contextmanager
def _tf_function_mode_context(tf_function_mode):
"""Context manager controlling `tf.function` behavior (enabled/disabled).
Before activating, the previously set mode is stored. Then the mode is changed
to the given `tf_function_mode` and control yielded back to the caller. Upon
exiting the context, the mode is returned to its original state.
Args:
tf_function_mode: a Python `str`, either 'disabled' or 'enabled'. If
'enabled', `@tf.function`-decorated code behaves as usual (ie, a background
graph is created). If 'disabled', `@tf.function`-decorated code will behave
as if it had not been `@tf.function`-decorated. Since users will be able to
do this (e.g., to debug library code that has been
`@tf.function`-decorated), we need to ensure our tests cover the behavior
when this is the case.
Yields:
None
"""
if tf_function_mode not in ['enabled', 'disabled']:
raise ValueError(
'Only allowable values for tf_function_mode_context are `enabled` and '
'`disabled`; but got `{}`'.format(tf_function_mode))
original_mode = def_function.RUN_FUNCTIONS_EAGERLY
try:
tf.config.experimental_run_functions_eagerly(tf_function_mode == 'disabled')
yield
finally:
tf.config.experimental_run_functions_eagerly(original_mode)
class ExecuteFunctionsEagerlyCombination(test_combinations.TestCombination):
"""A `TestCombinationi` for enabling/disabling `tf.function` execution modes.
For more on `TestCombination`, check out
'tensorflow/python/framework/test_combinations.py' in the TensorFlow code
base.
This `TestCombination` supports two values for the `tf_function`
combination argument: 'disabled' and 'enabled'. The mode switching is
performed using `tf.experimental_run_functions_eagerly(mode)`.
"""
def context_managers(self, kwargs):
mode = kwargs.pop('tf_function', 'enabled')
return [_tf_function_mode_context(mode)]
def parameter_modifiers(self):
return [test_combinations.OptionalParameter('tf_function')]
def test_all_tf_execution_regimes(test_class_or_method=None):
"""Decorator for generating a collection of tests in various contexts.
Must be applied to subclasses of `parameterized.TestCase` (from
`absl/testing`), or a method of such a subclass.
When applied to a test method, this decorator results in the replacement of
that method with a collection of new test methods, each executed under a
different set of context managers that control some aspect of the execution
model. This decorator generates three test scenario combinations:
1. Eager mode with `tf.function` decorations enabled
2. Eager mode with `tf.function` decorations disabled
3. Graph mode (eveything)
When applied to a test class, all the methods in the class are affected.
Args:
test_class_or_method: the `TestCase` class or method to decorate.
Returns:
decorator: A generated TF `test_combinations` decorator, or if
`test_class_or_method` is not `None`, the generated decorator applied to
that function.
"""
decorator = test_combinations.generate(
(test_combinations.combine(mode='graph',
tf_function='enabled') +
test_combinations.combine(mode='eager',
tf_function=['enabled', 'disabled'])),
test_combinations=[
combinations.EagerGraphCombination(),
ExecuteFunctionsEagerlyCombination(),
])
if test_class_or_method:
return decorator(test_class_or_method)
return decorator
def test_graph_and_eager_modes(test_class_or_method=None):
"""Decorator for generating graph and eager mode tests from a single test.
Must be applied to subclasses of `parameterized.TestCase` (from
absl/testing), or a method of such a subclass.
When applied to a test method, this decorator results in the replacement of
that method with a two new test methods, one executed in graph mode and the
other in eager mode.
When applied to a test class, all the methods in the class are affected.
Args:
test_class_or_method: the `TestCase` class or method to decorate.
Returns:
decorator: A generated TF `test_combinations` decorator, or if
`test_class_or_method` is not `None`, the generated decorator applied to
that function.
"""
decorator = test_combinations.generate(
test_combinations.combine(mode=['graph', 'eager']),
test_combinations=[combinations.EagerGraphCombination()])
if test_class_or_method:
return decorator(test_class_or_method)
return decorator
| 38.614379 | 103 | 0.741198 |
7946c18acc1c390c927bfd6ced0b3ab42693e747 | 4,859 | py | Python | mct_json_converter/main_window.py | iorodeo/mct_json_converter | aa7cbb180928868fb8b12e177a793373f6a15191 | [
"Apache-2.0"
] | 1 | 2020-07-23T19:04:03.000Z | 2020-07-23T19:04:03.000Z | mct_json_converter/main_window.py | iorodeo/mct_json_converter | aa7cbb180928868fb8b12e177a793373f6a15191 | [
"Apache-2.0"
] | null | null | null | mct_json_converter/main_window.py | iorodeo/mct_json_converter | aa7cbb180928868fb8b12e177a793373f6a15191 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import sys
import os
import time
import mct_json_converter
from PyQt4 import QtCore
from PyQt4 import QtGui
from converter_ui import Ui_MainWindow
USER_HOME = os.getenv('USERPROFILE')
if USER_HOME is None:
USER_HOME = os.getenv('HOME')
DEFAULT_DIRECTORY = os.path.join(USER_HOME,'mct_log')
else:
DEFAULT_DIRECTORY = USER_HOME
class ConverterMainWindow(QtGui.QMainWindow,Ui_MainWindow):
def __init__(self,parent=None):
super(ConverterMainWindow,self).__init__(parent)
self.setupUi(self)
self.connectActions()
self.initialize()
def initialize(self):
self.matRadioButton.setChecked(True)
self.fileListWidget.setAlternatingRowColors(True)
if os.path.isdir(DEFAULT_DIRECTORY):
self.directory = DEFAULT_DIRECTORY
else:
self.directory = USER_HOME
self.disableWidgetsOnEmpty()
if not mct_json_converter.haveh5py:
self.hdf5RadioButton.setEnabled(False)
def connectActions(self):
self.selectPushButton.clicked.connect(self.selectClicked)
self.clearPushButton.clicked.connect(self.clearClicked)
self.convertPushButton.clicked.connect(self.convertClicked)
def selectClicked(self):
if not os.path.isdir(self.directory):
self.directory = USER_HOME
fileNameList = QtGui.QFileDialog.getOpenFileNames(
self,
'Select JSON files for conversion',
self.directory,
"JSON (*.json)"
)
self.fileListWidget.clear()
if fileNameList:
for name in fileNameList:
listItem = QtGui.QListWidgetItem(name)
self.fileListWidget.addItem(listItem)
self.enableWidgetsOnNonEmpty()
else:
self.disableWidgetsOnEmpty()
def enableWidgetsOnNonEmpty(self):
self.convertPushButton.setEnabled(True)
self.clearPushButton.setEnabled(True)
def disableWidgetsOnEmpty(self):
self.convertPushButton.setEnabled(False)
self.clearPushButton.setEnabled(False)
def enableWidgetsAfterConverting(self):
self.selectPushButton.setEnabled(True)
self.clearPushButton.setEnabled(True)
self.matRadioButton.setEnabled(True)
if mct_json_converter.haveh5py:
self.hdf5RadioButton.setEnabled(True)
self.fileListWidget.setEnabled(True)
self.convertPushButton.setEnabled(True)
def disableWidgetsWhileConverting(self):
self.selectPushButton.setEnabled(False)
self.clearPushButton.setEnabled(False)
self.matRadioButton.setEnabled(False)
self.hdf5RadioButton.setEnabled(False)
self.fileListWidget.setEnabled(False)
self.convertPushButton.setEnabled(False)
def clearClicked(self):
self.fileListWidget.clear()
def convertClicked(self):
self.disableWidgetsWhileConverting()
fileFormat = self.getFileFormat()
numFiles = self.fileListWidget.count()
for i in range(numFiles):
item = self.fileListWidget.item(i)
fileName = str(item.text())
filePath = os.path.join(self.directory,fileName)
statusMessage = ' Converting: {0}/{1}'.format(i+1,numFiles)
self.statusbar.showMessage(statusMessage)
self.repaint()
try:
converter = mct_json_converter.JSON_Converter(filePath)
except Exception, e:
message = 'Unable to convert file: {0}\n\n{1}'.format(fileName,str(e))
QtGui.QMessageBox.critical(self,'Error',message)
self.enableWidgetsAfterConverting()
return
if fileFormat == 'mat':
writeFunc = converter.writeMatFile
elif fileFormat == 'hdf5':
writeFunc = converter.writeH5File
else:
raise RuntimeError, 'unknown file format'
try:
writeFunc()
except Exception, e:
message = 'Unable to convert file: {0}\n\n{1}'.format(fileName,str(e))
QtGui.QMessageBox.critical(self,'Error',message)
self.enableWidgetsAfterConverting()
return
self.statusbar.showMessage(' Done')
self.enableWidgetsAfterConverting()
def getFileFormat(self):
if self.hdf5RadioButton.isChecked():
fileFormat = 'hdf5'
else:
fileFormat = 'mat'
return fileFormat
def converterMain():
app = QtGui.QApplication(sys.argv)
mainWindow = ConverterMainWindow()
mainWindow.show()
app.exec_()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
converterMain()
| 34.460993 | 86 | 0.630582 |
7946c1a480bac043986cf39ce6ac793ecad9800c | 828 | py | Python | Rosalind/mrna.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | null | null | null | Rosalind/mrna.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | 1 | 2015-03-25T22:35:52.000Z | 2015-03-25T22:35:52.000Z | Rosalind/mrna.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | null | null | null | # Inferring mRNA from Protein
# rosalind.info/problems/mrna/
import sys
class mrna:
codon_freq = {'F':2, 'L':6, 'S':6, 'Y':2, 'A':4, 'C':2,
'W':1, 'P':4, 'H':2, 'Q':2, 'R':6, 'I':3,
'M':1, 'N':2, 'K':2, 'D':2, 'V':4, 'E':2,
'G':4, 'T':4, 'stop':3}
def main(self, dna_seq):
if not dna_seq:
raise Exception('ERROR: File is empty.')
res = self.codon_freq['stop']
for i in range(len(dna_seq)):
res *= self.codon_freq[dna_seq[i]]
return res % 1000000
if __name__ == '__main__':
filename = sys.argv[1]
if not filename:
raise Exception('ERROR: File name should not be empty!')
with open(filename, 'r') as seq_file:
result = mrna().main(seq_file.read())
print(result) | 29.571429 | 64 | 0.504831 |
7946c29d7f5341c8ab9780f4022f686031d18139 | 459 | py | Python | pitchdeck/wsgi.py | rjmackay/pitchdeck | 51e5715e95441169af783135cb34c48d6d380f00 | [
"MIT"
] | null | null | null | pitchdeck/wsgi.py | rjmackay/pitchdeck | 51e5715e95441169af783135cb34c48d6d380f00 | [
"MIT"
] | null | null | null | pitchdeck/wsgi.py | rjmackay/pitchdeck | 51e5715e95441169af783135cb34c48d6d380f00 | [
"MIT"
] | null | null | null | """
WSGI config for pitchdeck project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pitchdeck.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
from configurations.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
| 25.5 | 78 | 0.786492 |
7946c2bf55aefa9f8b086ae978865e9b69e24c76 | 4,533 | py | Python | dataset_coco.py | XDong18/dla | 063f1facc2bc49b2ca3a6bb9c87977b6f194505e | [
"BSD-3-Clause"
] | null | null | null | dataset_coco.py | XDong18/dla | 063f1facc2bc49b2ca3a6bb9c87977b6f194505e | [
"BSD-3-Clause"
] | null | null | null | dataset_coco.py | XDong18/dla | 063f1facc2bc49b2ca3a6bb9c87977b6f194505e | [
"BSD-3-Clause"
] | null | null | null | from pycocotools.coco import COCO
import os
import os.path as osp
import sys
import torch
import torch.utils.data as data
import cv2
import numpy as np
from PIL import Image
import data_transforms as transforms
def f_print(*obj):
fn='./output.log'
# print(obj)
with open(fn, 'a+') as f:
print(obj, file=f)
COCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',*
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush')
COCO_LABEL_MAP = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8,
9: 9, 10: 10, 11: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16,
18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24,
27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30, 35: 31, 36: 32,
37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40,
46: 41, 47: 42, 48: 43, 49: 44, 50: 45, 51: 46, 52: 47, 53: 48,
54: 49, 55: 50, 56: 51, 57: 52, 58: 53, 59: 54, 60: 55, 61: 56,
62: 57, 63: 58, 64: 59, 65: 60, 67: 61, 70: 62, 72: 63, 73: 64,
74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, 81: 72,
82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80}
class COCOSeg(data.Dataset):
def __init__(self, image_path, info_file, transforms=None, is_train=False):
self.root = image_path
self.coco = COCO(info_file)
self.ids = list(self.coco.imgToAnns.keys()) # may be some imaegs don't have labels
self.transforms = transforms
self.is_train = is_train
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
img, mask = self.pull_item(index)
if self.is_train:
mask_PIL = Image.fromarray(mask)
# img = img.resize((1920, 1216))
data = [img]
data.append(mask_PIL)
data = list(self.transforms(*data))
trans_temp = transforms.ToTensor()
tensor_img = trans_temp(data[0])[0]
array_mask = np.array(data[1])
return(tuple([tensor_img, torch.from_numpy(array_mask)]))
else:
# img = img.resize((1920, 1216))
img = np.array(img)
img = np.transpose(img, (2,0,1))
if img.max()>1:
img = img / 255.
return(tuple([torch.from_numpy(img), torch.from_numpy(mask)]))
def pull_item(self, index):
'''
Return Image, array
'''
img_id = self.ids[index]
ann_ids = self.coco.getAnnIds(imgIds=img_id)
target = self.coco.loadAnns(ann_ids)
crowd = [x for x in target if ('iscrowd' in x and x['iscrowd'])]
target = [x for x in target if not ('iscrowd' in x and x['iscrowd'])]
target = crowd + target
file_name = self.coco.loadImgs(img_id)[0]['file_name']
path = osp.join(self.root, file_name)
img = Image.open(path)
if len(img.split())==1:
img_array = np.array(img)
img_array = cv2.merge((img_array, img_array, img_array))
img = Image.fromarray(img_array)
width, height = img.size
mask = np.zeros((height, width))
for obj in target:
cat_id = COCO_LABEL_MAP[obj['category_id']]
obj_mask = self.coco.annToMask(obj)
mask[np.where(obj_mask==1)] = cat_id
# mask = np.expand_dims(mask, axis=0)
# f_print(mask.min(), mask.max())
return img, mask | 41.209091 | 90 | 0.521288 |
7946c2e55b4c68dfc1dfc0b540957a9f648d2f66 | 3,971 | py | Python | WebsiteVisitUserPatternAnalyze.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
] | null | null | null | WebsiteVisitUserPatternAnalyze.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
] | null | null | null | WebsiteVisitUserPatternAnalyze.py | syeddabeer/0projects | e132628f3693ed40c5ea9055a6c79f8266196bae | [
"Apache-2.0"
] | null | null | null | class Solution:
def mostVisitedPattern(self, username: List[str], timestamp: List[int], website: List[str]) -> List[str]:
users = defaultdict(list)
for user, time, site in sorted(zip(username, timestamp, website), key = lambda x: (x[0],x[1])):
users[user].append(site)
patterns = Counter()
for user, sites in users.items():
patterns.update(Counter(set(combinations(sites, 3))))
return max(sorted(patterns), key=patterns.get)
class Solution2:
def mostVisitedPattern(self, username, timestamp, website):
# Create tuples as shown in description
# The timestamps may not always be pre-ordered (one of the testcases)
# Sort first based on user, then time (grouping by user)
# This also helps to maintain order of websites visited in the later part of the solution
users = defaultdict(list)
# It is not necessary to use defaultdict here, we can manually create dictionaries too
for user, time, site in sorted(zip(username, timestamp, website), key = lambda x: (x[0],x[1])):
users[user].append(site) # defaultdicts simplify and optimize code
patterns = Counter() # this can also be replaced with a manually created dictionary of counts
# Get unique 3-sequence (note that website order will automatically be maintained)
# Note that we take the set of each 3-sequence for each user as they may have repeats
# For each 3-sequence, count number of users
for user, sites in users.items():
patterns.update(Counter(set(combinations(sites, 3))))
# Re-iterating above step for clarity
# 1. first get all possible 3-sequences combinations(sites, 3)
# 2. then, count each one once (set)
# 3. finally, count the number of times we've seen the 3-sequence for every user (patterns.update(Counter))
# - updating a dictionary will update the value for existing keys accordingly (int in this case)
# An expanded version of the above step is given below.
# print(patterns) # sanity check
# get most frequent 3-sequence sorted lexicographically
return max(sorted(patterns), key=patterns.get)
class Solution:
def mostVisitedPattern(self, username: List[str], timestamp: List[int], website: List[str]) -> List[str]:
users = defaultdict(list)
for user, time, site in sorted(zip(username, timestamp, website), key = lambda x: (x[0],x[1])):
users[user].append(site)
patterns = Counter()
for user, sites in users.items():
patterns.update(Counter(set(combinations(sites, 3))))
return max(sorted(patterns), key=patterns.get)
"""
You are given two string arrays username and website and an integer array timestamp. All the given arrays are of the same length and the tuple [username[i], website[i], timestamp[i]] indicates that the user username[i] visited the website website[i] at time timestamp[i].
A pattern is a list of three websites (not necessarily distinct).
For example, ["home", "away", "love"], ["leetcode", "love", "leetcode"], and ["luffy", "luffy", "luffy"] are all patterns.
The score of a pattern is the number of users that visited all the websites in the pattern in the same order they appeared in the pattern.
For example, if the pattern is ["home", "away", "love"], the score is the number of users x such that x visited "home" then visited "away" and visited "love" after that.
Similarly, if the pattern is ["leetcode", "love", "leetcode"], the score is the number of users x such that x visited "leetcode" then visited "love" and visited "leetcode" one more time after that.
Also, if the pattern is ["luffy", "luffy", "luffy"], the score is the number of users x such that x visited "luffy" three different times at different timestamps.
Return the pattern with the largest score. If there is more than one pattern with the same largest score, return the lexicographically smallest such pattern.
""" | 49.6375 | 271 | 0.700076 |
7946c378ee5afcc22d03400f6f322100d8077abe | 14,009 | py | Python | tests/suite/test_rl_policies_vsr.py | datamachines/kubernetes-ingress | d4629491677daeb3f247774009cff0beaffe0ec6 | [
"Apache-2.0"
] | 1 | 2021-06-06T23:51:18.000Z | 2021-06-06T23:51:18.000Z | tests/suite/test_rl_policies_vsr.py | datamachines/kubernetes-ingress | d4629491677daeb3f247774009cff0beaffe0ec6 | [
"Apache-2.0"
] | null | null | null | tests/suite/test_rl_policies_vsr.py | datamachines/kubernetes-ingress | d4629491677daeb3f247774009cff0beaffe0ec6 | [
"Apache-2.0"
] | null | null | null | import pytest, requests, time
from kubernetes.client.rest import ApiException
from suite.resources_utils import wait_before_test, replace_configmap_from_yaml
from suite.custom_resources_utils import (
read_crd,
delete_virtual_server,
create_virtual_server_from_yaml,
patch_virtual_server_from_yaml,
patch_v_s_route_from_yaml,
create_policy_from_yaml,
delete_policy,
read_policy,
)
from settings import TEST_DATA, DEPLOYMENTS
std_vs_src = f"{TEST_DATA}/virtual-server-route/standard/virtual-server.yaml"
rl_pol_pri_src = f"{TEST_DATA}/rate-limit/policies/rate-limit-primary.yaml"
rl_vsr_pri_src = f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-pri-subroute.yaml"
rl_pol_sec_src = f"{TEST_DATA}/rate-limit/policies/rate-limit-secondary.yaml"
rl_vsr_sec_src = f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-sec-subroute.yaml"
rl_pol_invalid_src = f"{TEST_DATA}/rate-limit/policies/rate-limit-invalid.yaml"
rl_vsr_invalid_src = (
f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-invalid-subroute.yaml"
)
rl_vsr_override_src = (
f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-route-override-subroute.yaml"
)
rl_vsr_override_vs_spec_src = (
f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-vsr-spec-override.yaml"
)
rl_vsr_override_vs_route_src = (
f"{TEST_DATA}/rate-limit/route-subroute/virtual-server-vsr-route-override.yaml"
)
@pytest.mark.policies
@pytest.mark.parametrize(
"crd_ingress_controller, v_s_route_setup",
[
(
{
"type": "complete",
"extra_args": [
f"-enable-custom-resources",
f"-enable-preview-policies",
f"-enable-leader-election=false",
],
},
{"example": "virtual-server-route"},
)
],
indirect=True,
)
class TestRateLimitingPoliciesVsr:
def restore_default_vsr(self, kube_apis, v_s_route_setup) -> None:
"""
Function to revert vsr deployments to valid state
"""
patch_src_m = f"{TEST_DATA}/virtual-server-route/route-multiple.yaml"
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
patch_src_m,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
@pytest.mark.smoke
@pytest.mark.parametrize("src", [rl_vsr_pri_src])
def test_rl_policy_1rs_vsr(
self,
kube_apis,
crd_ingress_controller,
v_s_route_app_setup,
v_s_route_setup,
test_namespace,
src,
):
"""
Test if rate-limiting policy is working with ~1 rps in vsr:subroute
"""
req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
print(f"Create rl policy")
pol_name = create_policy_from_yaml(
kube_apis.custom_objects, rl_pol_pri_src, v_s_route_setup.route_m.namespace
)
print(f"Patch vsr with policy: {src}")
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
src,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
policy_info = read_crd(
kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "policies", pol_name
)
occur = []
t_end = time.perf_counter() + 1
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
print(resp.status_code)
assert resp.status_code == 200
while time.perf_counter() < t_end:
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
occur.append(resp.status_code)
delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace)
self.restore_default_vsr(kube_apis, v_s_route_setup)
assert (
policy_info["status"]
and policy_info["status"]["reason"] == "AddedOrUpdated"
and policy_info["status"]["state"] == "Valid"
)
assert occur.count(200) <= 1
@pytest.mark.parametrize("src", [rl_vsr_sec_src])
def test_rl_policy_10rs_vsr(
self,
kube_apis,
crd_ingress_controller,
v_s_route_app_setup,
v_s_route_setup,
test_namespace,
src,
):
"""
Test if rate-limiting policy is working with ~10 rps in vsr:subroute
"""
rate_sec = 10
req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
print(f"Create rl policy")
pol_name = create_policy_from_yaml(
kube_apis.custom_objects, rl_pol_sec_src, v_s_route_setup.route_m.namespace
)
print(f"Patch vsr with policy: {src}")
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
src,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
policy_info = read_crd(
kube_apis.custom_objects, v_s_route_setup.route_m.namespace, "policies", pol_name
)
occur = []
t_end = time.perf_counter() + 1
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
print(resp.status_code)
assert resp.status_code == 200
while time.perf_counter() < t_end:
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
occur.append(resp.status_code)
delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace)
self.restore_default_vsr(kube_apis, v_s_route_setup)
assert (
policy_info["status"]
and policy_info["status"]["reason"] == "AddedOrUpdated"
and policy_info["status"]["state"] == "Valid"
)
assert rate_sec >= occur.count(200) >= (rate_sec - 2)
@pytest.mark.parametrize("src", [rl_vsr_override_src])
def test_rl_policy_override_vsr(
self,
kube_apis,
crd_ingress_controller,
v_s_route_app_setup,
v_s_route_setup,
test_namespace,
src,
):
"""
Test if rate-limiting policy with lower rps is used when multiple policies are listed in vsr:subroute
And test if the order of policies in vsr:subroute has no effect
"""
req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
print(f"Create rl policy: 1rps")
pol_name_pri = create_policy_from_yaml(
kube_apis.custom_objects, rl_pol_pri_src, v_s_route_setup.route_m.namespace
)
print(f"Create rl policy: 10rps")
pol_name_sec = create_policy_from_yaml(
kube_apis.custom_objects, rl_pol_sec_src, v_s_route_setup.route_m.namespace
)
print(f"Patch vsr with policy: {src}")
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
src,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
occur = []
t_end = time.perf_counter() + 1
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
print(resp.status_code)
assert resp.status_code == 200
while time.perf_counter() < t_end:
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
occur.append(resp.status_code)
delete_policy(kube_apis.custom_objects, pol_name_pri, v_s_route_setup.route_m.namespace)
delete_policy(kube_apis.custom_objects, pol_name_sec, v_s_route_setup.route_m.namespace)
self.restore_default_vsr(kube_apis, v_s_route_setup)
assert occur.count(200) <= 1
@pytest.mark.parametrize("src", [rl_vsr_pri_src])
def test_rl_policy_deleted_vsr(
self,
kube_apis,
crd_ingress_controller,
v_s_route_app_setup,
v_s_route_setup,
test_namespace,
src,
):
"""
Test if deleting a policy results in 500
"""
req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
print(f"Create rl policy")
pol_name = create_policy_from_yaml(
kube_apis.custom_objects, rl_pol_pri_src, v_s_route_setup.route_m.namespace
)
print(f"Patch vsr with policy: {src}")
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
src,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
assert resp.status_code == 200
print(resp.status_code)
delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace)
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
self.restore_default_vsr(kube_apis, v_s_route_setup)
assert resp.status_code == 500
@pytest.mark.parametrize("src", [rl_vsr_invalid_src])
def test_rl_policy_invalid_vsr(
self,
kube_apis,
crd_ingress_controller,
v_s_route_app_setup,
v_s_route_setup,
test_namespace,
src,
):
"""
Test if using an invalid policy in vsr:subroute results in 500
"""
req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
print(f"Create rl policy")
invalid_pol_name = create_policy_from_yaml(
kube_apis.custom_objects, rl_pol_invalid_src, v_s_route_setup.route_m.namespace
)
print(f"Patch vsr with policy: {src}")
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
src,
v_s_route_setup.route_m.namespace,
)
wait_before_test()
policy_info = read_crd(
kube_apis.custom_objects,
v_s_route_setup.route_m.namespace,
"policies",
invalid_pol_name,
)
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
print(resp.status_code)
delete_policy(kube_apis.custom_objects, invalid_pol_name, v_s_route_setup.route_m.namespace)
self.restore_default_vsr(kube_apis, v_s_route_setup)
assert (
policy_info["status"]
and policy_info["status"]["reason"] == "Rejected"
and policy_info["status"]["state"] == "Invalid"
)
assert resp.status_code == 500
@pytest.mark.parametrize("src", [rl_vsr_override_vs_spec_src, rl_vsr_override_vs_route_src])
def test_override_vs_vsr(
self,
kube_apis,
crd_ingress_controller,
v_s_route_app_setup,
test_namespace,
v_s_route_setup,
src,
):
"""
Test if vsr subroute policy overrides vs spec policy
And vsr subroute policy overrides vs route policy
"""
rate_sec = 10
req_url = f"http://{v_s_route_setup.public_endpoint.public_ip}:{v_s_route_setup.public_endpoint.port}"
# policy for virtualserver
print(f"Create rl policy: 1rps")
pol_name_vs = create_policy_from_yaml(
kube_apis.custom_objects, rl_pol_pri_src, v_s_route_setup.route_m.namespace
)
# policy for virtualserverroute
print(f"Create rl policy: 10rps")
pol_name_vsr = create_policy_from_yaml(
kube_apis.custom_objects, rl_pol_sec_src, v_s_route_setup.route_m.namespace
)
# patch vsr with 10rps policy
patch_v_s_route_from_yaml(
kube_apis.custom_objects,
v_s_route_setup.route_m.name,
rl_vsr_sec_src,
v_s_route_setup.route_m.namespace,
)
# patch vs with 1rps policy
patch_virtual_server_from_yaml(
kube_apis.custom_objects, v_s_route_setup.vs_name, src, v_s_route_setup.namespace
)
wait_before_test()
occur = []
t_end = time.perf_counter() + 1
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
print(resp.status_code)
assert resp.status_code == 200
while time.perf_counter() < t_end:
resp = requests.get(
f"{req_url}{v_s_route_setup.route_m.paths[0]}",
headers={"host": v_s_route_setup.vs_host},
)
occur.append(resp.status_code)
delete_policy(kube_apis.custom_objects, pol_name_vs, v_s_route_setup.route_m.namespace)
delete_policy(kube_apis.custom_objects, pol_name_vsr, v_s_route_setup.route_m.namespace)
self.restore_default_vsr(kube_apis, v_s_route_setup)
patch_virtual_server_from_yaml(
kube_apis.custom_objects, v_s_route_setup.vs_name, std_vs_src, v_s_route_setup.namespace
)
assert rate_sec >= occur.count(200) >= (rate_sec - 2)
| 37.159151 | 110 | 0.633022 |
7946c42bac956164a24ced7f38c27636f661dc2f | 10,804 | py | Python | ogs5py/tools/script.py | MuellerSeb/ogs5py | 752e7bd2298fbd476406d168f6b7d1a85863dccd | [
"MIT"
] | 3 | 2018-05-27T15:39:07.000Z | 2018-10-29T17:02:11.000Z | ogs5py/tools/script.py | MuellerSeb/ogs5py | 752e7bd2298fbd476406d168f6b7d1a85863dccd | [
"MIT"
] | 1 | 2018-11-12T11:32:12.000Z | 2018-11-12T13:07:48.000Z | ogs5py/tools/script.py | MuellerSeb/ogs5py | 752e7bd2298fbd476406d168f6b7d1a85863dccd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Script generator for ogs5py.
.. currentmodule:: ogs5py.tools.script
Generator
^^^^^^^^^
.. autosummary::
:toctree: generated
gen_script
Helpers
^^^^^^^
.. autosummary::
:toctree: generated
formater
get_line
tab
add_block_file
add_load_file
add_list_file
----
"""
import os
import shutil
from ogs5py.fileclasses.base import BlockFile
from ogs5py.tools.types import MULTI_FILES, OGS_EXT, STRTYPE
def formater(val):
"""
Format values as string.
Parameters
----------
val : value
input value to be formatted
"""
if isinstance(val, STRTYPE):
# add quotes to strings
return "'" + val + "'"
return str(val)
def get_line(cont_line):
"""
Create content line for the script.
Parameters
----------
cont_line : list of values
content line from a BlockFile
"""
return "[" + ", ".join(map(formater, cont_line)) + "]"
def tab(num):
"""
Get tab indentation.
Parameters
----------
num : int
indentation depth
"""
return num * 4 * " "
def add_block_file(block_file, script, ogs_cls_name="model"):
"""
Add block-file creation to script.
Parameters
----------
block_file : BlockFile
BlockFile class to be added to the script
script : stream
given opened file for the script
ogs_cls_name : str
name of the model within the script
"""
block_no = block_file.get_block_no()
file_type = block_file.get_file_type().lower()
for i in range(block_no):
mkey, skeys, cont = block_file.get_block(index=i, as_dict=False)
if block_file.is_block_unique(i):
print(ogs_cls_name + "." + file_type + ".add_block(", file=script)
if "" not in skeys:
print(tab(1) + "main_key=" + formater(mkey) + ",", file=script)
for j, skey in enumerate(skeys):
if skey == "":
skey = mkey
line_no = len(cont[j])
# empty value
if (
line_no == 0
or (line_no == 1 and not cont[j][0])
or (
line_no == 1
and len(cont[j][0]) == 1
and cont[j][0][0] == ""
)
):
print(tab(1) + skey + "=[],", file=script)
# single value
elif line_no == 1 and len(cont[j][0]) == 1:
print(
tab(1) + skey + "=" + formater(cont[j][0][0]) + ",",
file=script,
)
# single line
elif line_no == 1:
print(
tab(1) + skey + "=" + get_line(cont[j][0]) + ",",
file=script,
)
# multiple lines
else:
print(tab(1) + skey + "=[", file=script)
for cont_k in cont[j]:
print(tab(2) + get_line(cont_k) + ",", file=script)
print(tab(1) + "],", file=script)
print(")", file=script)
else:
print(ogs_cls_name + "." + file_type + ".add_block(", file=script)
print(tab(1) + "main_key=" + formater(mkey) + ",", file=script)
line_no = len(cont[0])
skey = skeys[0]
# empty first value
if (
line_no == 0
or (line_no == 1 and not cont[0][0])
or (
line_no == 1
and len(cont[0][0]) == 1
and cont[0][0][0] == ""
)
):
print(tab(1) + skey + "=[],", file=script)
# single first value
elif line_no == 1 and len(cont[0][0]) == 1:
print(
tab(1) + skey + "=" + formater(cont[0][0][0]) + ",",
file=script,
)
# single first line
elif line_no == 1:
print(
tab(1) + skey + "=" + get_line(cont[0][0]) + ",",
file=script,
)
# multiple first lines
else:
print(tab(1) + skey + "=[", file=script)
for cont_k in cont[0]:
print(tab(2) + get_line(cont_k) + ",", file=script)
print(tab(1) + "],", file=script)
print(")", file=script)
# additional lines
for j, skey in enumerate(skeys[1:]):
j += 1 # get the right content
print(
ogs_cls_name + "." + file_type + ".append_to_block(",
file=script,
)
line_no = len(cont[j])
# empty value
if (
line_no == 0
or (line_no == 1 and not cont[j][0])
or (
line_no == 1
and len(cont[j][0]) == 1
and cont[j][0][0] == ""
)
):
print(tab(1) + skey + "=[],", file=script)
# single value
elif line_no == 1 and len(cont[j][0]) == 1:
print(
tab(1) + skey + "=" + formater(cont[j][0][0]) + ",",
file=script,
)
# single line
elif line_no == 1:
print(
tab(1) + skey + "=" + get_line(cont[j][0]) + ",",
file=script,
)
# multiple lines
else:
print(tab(1) + skey + "=[", file=script)
for cont_k in cont[j]:
print(tab(2) + get_line(cont_k) + ",", file=script)
print(tab(1) + "],", file=script)
print(")", file=script)
def add_load_file(load_file, script, ogs_cls_name="model"):
"""
Add a file to be loaded from a script.
Parameters
----------
load_file : OGSFile
file that should be saved and then loaded from the script
script : stream
given opened file for the script
ogs_cls_name : str
name of the model within the script
"""
if load_file.is_empty:
return
load_file.write_file()
name = load_file.file_name
file_type = load_file.get_file_type().lower()
print(
ogs_cls_name + "." + file_type + ".read_file(" + formater(name) + ")",
file=script,
)
def add_list_file(list_file, script, typ, ogs_cls_name="model"):
"""
Add a listed file to be loaded from a script.
Parameters
----------
list_file : File
listed file that should be saved and then loaded from the script
script : stream
given opened file for the script
typ : str
typ of the list file
ogs_cls_name : str
name of the model within the script
"""
list_file.write_file()
name = list_file.name
file_ext = list_file.file_ext
file_name = name + file_ext
print(ogs_cls_name + "." + typ + ".add(", file=script)
print(tab(1) + "name=" + formater(name) + ",", file=script)
print(tab(1) + "file_ext=" + formater(file_ext) + ",", file=script)
print(")", file=script)
print(
ogs_cls_name + "." + typ + ".read_file(" + formater(file_name) + ")",
file=script,
)
def gen_script(
ogs_class,
script_dir=os.path.join(os.getcwd(), "ogs_script"),
script_name="model.py",
ogs_cls_name="model",
task_root=None,
task_id=None,
output_dir=None,
separate_files=None,
):
"""
Generate a python script for the given model.
Parameters
----------
ogs_class : OGS
model class to be converted to a script
script_dir : str
target directory for the script
script_name : str
name for the script file (including .py ending)
ogs_cls_name : str
name of the model in the script
task_root : str
used task_root in the script
task_id : str
used task_id in the script
output_dir : str
used output_dir in the script
separate_files : list of str or None
list of files, that should be written to separate files and
then loaded from the script
Notes
-----
This will only create BlockFiles from the script. GLI and MSH files
as well as every listed or line-wise file will be stored separately.
"""
if separate_files is None:
separate_files = []
if task_root is None:
task_root = ogs_class.task_root
if task_id is None:
task_id = ogs_class.task_id
if not os.path.exists(script_dir):
os.makedirs(script_dir)
path = os.path.join(script_dir, script_name)
# temporarily overwrite the task_root
original_root = ogs_class.task_root
ogs_class.task_root = script_dir
# set the imported classes
load = ["OGS"]
load = ", ".join(load)
# open the script file
with open(path, "w") as script:
print("# -*- coding: utf-8 -*-", file=script)
print("from ogs5py import " + load, file=script)
print("", file=script)
print(ogs_cls_name + " = OGS(", file=script)
print(tab(1) + "task_root=" + formater(task_root) + ",", file=script)
print(tab(1) + "task_id=" + formater(task_id) + ",", file=script)
if output_dir is not None:
print(
tab(1) + "output_dir=" + formater(output_dir) + ",",
file=script,
)
print(")", file=script)
for ext in OGS_EXT:
ogs_file = getattr(ogs_class, ext[1:])
if (
not isinstance(ogs_file, BlockFile)
or ext[1:] in separate_files
):
add_load_file(ogs_file, script, ogs_cls_name)
else:
add_block_file(ogs_file, script, ogs_cls_name)
add_load_file(ogs_class.pqcdat, script, ogs_cls_name)
for typ in MULTI_FILES:
for file in getattr(ogs_class, typ):
add_list_file(file, script, typ, ogs_cls_name)
for copy_file in ogs_class.copy_files:
base = os.path.basename(copy_file)
shutil.copyfile(copy_file, os.path.join(script_dir, base))
print(ogs_cls_name + ".add_copy_file(" + base + ")", file=script)
print(ogs_cls_name + ".write_input()", file=script)
print(ogs_cls_name + ".run_model()", file=script)
ogs_class.task_root = original_root
| 30.780627 | 79 | 0.494076 |
7946c46960ef15fdcaff6b5ad9f0bc2623a84b17 | 5,077 | py | Python | tensorflow/python/keras/_impl/keras/datasets/imdb.py | 2392863668/tensorFlow | 6743031da633d1ce284a606c49eb00e793c1d729 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/_impl/keras/datasets/imdb.py | 2392863668/tensorFlow | 6743031da633d1ce284a606c49eb00e793c1d729 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/_impl/keras/datasets/imdb.py | 2392863668/tensorFlow | 6743031da633d1ce284a606c49eb00e793c1d729 | [
"Apache-2.0"
] | 1 | 2020-05-08T22:28:00.000Z | 2020-05-08T22:28:00.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMDB movie review sentiment classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.datasets.imdb.load_data')
def load_data(path='imdb.npz',
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3):
"""Loads the IMDB dataset.
Arguments:
path: where to cache the data (relative to `~/.keras/dataset`).
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: sequences longer than this will be filtered out.
seed: random seed for sample shuffling.
start_char: The start of a sequence will be marked with this character.
Set to 1 because 0 is usually the padding character.
oov_char: words that were cut out because of the `num_words`
or `skip_top` limit will be replaced with this character.
index_from: index actual words with this index and higher.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Raises:
ValueError: in case `maxlen` is so low
that no input sequence could be kept.
Note that the 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
path = get_file(
path,
origin='https://s3.amazonaws.com/text-datasets/imdb.npz',
file_hash='599dadb1135973df5b59232a0e9a887c')
f = np.load(path)
x_train, labels_train = f['x_train'], f['y_train']
x_test, labels_test = f['x_test'], f['y_test']
f.close()
np.random.seed(seed)
indices = np.arange(len(x_train))
np.random.shuffle(indices)
x_train = x_train[indices]
labels_train = labels_train[indices]
indices = np.arange(len(x_test))
np.random.shuffle(indices)
x_test = x_test[indices]
labels_test = labels_test[indices]
xs = np.concatenate([x_train, x_test])
labels = np.concatenate([labels_train, labels_test])
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
if maxlen:
new_xs = []
new_labels = []
for x, y in zip(xs, labels):
if len(x) < maxlen:
new_xs.append(x)
new_labels.append(y)
xs = new_xs
labels = new_labels
if not xs:
raise ValueError('After filtering for sequences shorter than maxlen=' +
str(maxlen) + ', no sequence was kept. '
'Increase maxlen.')
if not num_words:
num_words = max([max(x) for x in xs])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [[oov_char if (w >= num_words or w < skip_top) else w for w in x]
for x in xs]
else:
new_xs = []
for x in xs:
nx = []
for w in x:
if skip_top <= w < num_words:
nx.append(w)
new_xs.append(nx)
xs = new_xs
x_train = np.array(xs[:len(x_train)])
y_train = np.array(labels[:len(x_train)])
x_test = np.array(xs[len(x_train):])
y_test = np.array(labels[len(x_train):])
return (x_train, y_train), (x_test, y_test)
@tf_export('keras.datasets.imdb.get_word_index')
def get_word_index(path='imdb_word_index.json'):
"""Retrieves the dictionary mapping word indices back to words.
Arguments:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary.
"""
path = get_file(
path,
origin='https://s3.amazonaws.com/text-datasets/imdb_word_index.json')
f = open(path)
data = json.load(f)
f.close()
return data
| 32.754839 | 80 | 0.659445 |
7946c4e03d0d885474fc249354dd123c3ed4da28 | 3,142 | py | Python | contextmonkey/tracelayer/processingfilters/CabspottingLocationModify.py | manojrege/contextmonkey | 9974889a726d7f60c6da0d6ccab97113ce635a14 | [
"BSD-3-Clause"
] | 4 | 2017-03-17T02:28:12.000Z | 2019-04-18T11:25:44.000Z | contextmonkey/tracelayer/processingfilters/CabspottingLocationModify.py | manojrege/contextmonkey | 9974889a726d7f60c6da0d6ccab97113ce635a14 | [
"BSD-3-Clause"
] | null | null | null | contextmonkey/tracelayer/processingfilters/CabspottingLocationModify.py | manojrege/contextmonkey | 9974889a726d7f60c6da0d6ccab97113ce635a14 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Technische Universität Berlin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the Technische Universitaet Berlin nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Cabspotting location data processing filter."""
__author__ = "Manoj R. Rege"
__copyright__ = "Copyright (c) 2017, Technische Universität Berlin"
__credits__ = ["Manoj R. Rege"]
__version__ = "1.0"
__maintainer__ = "Manoj R. Rege"
__email__ = "[email protected]"
__status__ = "Prototype"
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
sys.path.append('../../../')
from zope.interface import implementer
from contextmonkey.tracelayer.interfaces.ITraceModify import ITraceModify
from contextmonkey.ContextMonkeyLogger import tracelayer
@implementer(ITraceModify)
class CabspottingLocationModify(object):
"""Class providing implementation of ITraceModfy for Cabspotting Location traces."""
filterattributes = None
def __init__(self):
"""Initialize."""
self.tracedatalist = []
def filterTrace(self, tracedata, uuid, **kwargs):
"""Remove unwanted attributes from the tracedata."""
return tracedata
def handlefailure(self, dummy=None):
"""Handle failure of fetching trace, by generating random values."""
pass
def processTrace(self,tracedatalist, uuid, **kwargs):
"""Perform trace processing."""
tracelayer.log("CABSPOTTINGLOCATIONMODIFY-processTrace",str(tracedatalist))
tracedata = tracedatalist[0].split(' ')
tempdict={'latitude':float(tracedata[0]), 'longitude':float(tracedata[1]), 'altitude':float(tracedata[2])}
return tempdict
| 41.893333 | 114 | 0.744749 |
7946c643243295ec05241863bf2cdcb237f97700 | 11,253 | py | Python | env/lib/python3.7/site-packages/numpy/doc/basics.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 99 | 2019-10-09T16:14:46.000Z | 2022-03-17T02:23:47.000Z | env/lib/python3.7/site-packages/numpy/doc/basics.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 26 | 2020-03-24T18:07:06.000Z | 2022-03-12T00:12:27.000Z | env/lib/python3.7/site-packages/numpy/doc/basics.py | MarcoMancha/BreastCancerDetector | be0dfdcebd1ae66da6d0cf48e2525c24942ae877 | [
"Apache-2.0"
] | 98 | 2019-10-17T14:48:28.000Z | 2022-01-21T03:33:38.000Z | """
============
Array basics
============
Array types and conversions between types
=========================================
NumPy supports a much greater variety of numerical types than Python does.
This section shows which are available, and how to modify an array's data-type.
The primitive types supported are tied closely to those in C:
.. list-table::
:header-rows: 1
* - Numpy type
- C type
- Description
* - `np.bool`
- ``bool``
- Boolean (True or False) stored as a byte
* - `np.byte`
- ``signed char``
- Platform-defined
* - `np.ubyte`
- ``unsigned char``
- Platform-defined
* - `np.short`
- ``short``
- Platform-defined
* - `np.ushort`
- ``unsigned short``
- Platform-defined
* - `np.intc`
- ``int``
- Platform-defined
* - `np.uintc`
- ``unsigned int``
- Platform-defined
* - `np.int_`
- ``long``
- Platform-defined
* - `np.uint`
- ``unsigned long``
- Platform-defined
* - `np.longlong`
- ``long long``
- Platform-defined
* - `np.ulonglong`
- ``unsigned long long``
- Platform-defined
* - `np.half` / `np.float16`
-
- Half precision float:
sign bit, 5 bits exponent, 10 bits mantissa
* - `np.single`
- ``float``
- Platform-defined single precision float:
typically sign bit, 8 bits exponent, 23 bits mantissa
* - `np.double`
- ``double``
- Platform-defined double precision float:
typically sign bit, 11 bits exponent, 52 bits mantissa.
* - `np.longdouble`
- ``long double``
- Platform-defined extended-precision float
* - `np.csingle`
- ``float complex``
- Complex number, represented by two single-precision floats (real and imaginary components)
* - `np.cdouble`
- ``double complex``
- Complex number, represented by two double-precision floats (real and imaginary components).
* - `np.clongdouble`
- ``long double complex``
- Complex number, represented by two extended-precision floats (real and imaginary components).
Since many of these have platform-dependent definitions, a set of fixed-size
aliases are provided:
.. list-table::
:header-rows: 1
* - Numpy type
- C type
- Description
* - `np.int8`
- ``int8_t``
- Byte (-128 to 127)
* - `np.int16`
- ``int16_t``
- Integer (-32768 to 32767)
* - `np.int32`
- ``int32_t``
- Integer (-2147483648 to 2147483647)
* - `np.int64`
- ``int64_t``
- Integer (-9223372036854775808 to 9223372036854775807)
* - `np.uint8`
- ``uint8_t``
- Unsigned integer (0 to 255)
* - `np.uint16`
- ``uint16_t``
- Unsigned integer (0 to 65535)
* - `np.uint32`
- ``uint32_t``
- Unsigned integer (0 to 4294967295)
* - `np.uint64`
- ``uint64_t``
- Unsigned integer (0 to 18446744073709551615)
* - `np.intp`
- ``intptr_t``
- Integer used for indexing, typically the same as ``ssize_t``
* - `np.uintp`
- ``uintptr_t``
- Integer large enough to hold a pointer
* - `np.float32`
- ``float``
-
* - `np.float64` / `np.float_`
- ``double``
- Note that this matches the precision of the builtin python `float`.
* - `np.complex64`
- ``float complex``
- Complex number, represented by two 32-bit floats (real and imaginary components)
* - `np.complex128` / `np.complex_`
- ``double complex``
- Note that this matches the precision of the builtin python `complex`.
NumPy numerical types are instances of ``dtype`` (data-type) objects, each
having unique characteristics. Once you have imported NumPy using
::
>>> import numpy as np
the dtypes are available as ``np.bool_``, ``np.float32``, etc.
Advanced types, not listed in the table above, are explored in
section :ref:`structured_arrays`.
There are 5 basic numerical types representing booleans (bool), integers (int),
unsigned integers (uint) floating point (float) and complex. Those with numbers
in their name indicate the bitsize of the type (i.e. how many bits are needed
to represent a single value in memory). Some types, such as ``int`` and
``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit
vs. 64-bit machines). This should be taken into account when interfacing
with low-level code (such as C or Fortran) where the raw memory is addressed.
Data-types can be used as functions to convert python numbers to array scalars
(see the array scalar section for an explanation), python sequences of numbers
to arrays of that type, or as arguments to the dtype keyword that many numpy
functions or methods accept. Some examples::
>>> import numpy as np
>>> x = np.float32(1.0)
>>> x
1.0
>>> y = np.int_([1,2,4])
>>> y
array([1, 2, 4])
>>> z = np.arange(3, dtype=np.uint8)
>>> z
array([0, 1, 2], dtype=uint8)
Array types can also be referred to by character codes, mostly to retain
backward compatibility with older packages such as Numeric. Some
documentation may still refer to these, for example::
>>> np.array([1, 2, 3], dtype='f')
array([ 1., 2., 3.], dtype=float32)
We recommend using dtype objects instead.
To convert the type of an array, use the .astype() method (preferred) or
the type itself as a function. For example: ::
>>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE
array([ 0., 1., 2.])
>>> np.int8(z)
array([0, 1, 2], dtype=int8)
Note that, above, we use the *Python* float object as a dtype. NumPy knows
that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``,
that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``.
The other data-types do not have Python equivalents.
To determine the type of an array, look at the dtype attribute::
>>> z.dtype
dtype('uint8')
dtype objects also contain information about the type, such as its bit-width
and its byte-order. The data type can also be used indirectly to query
properties of the type, such as whether it is an integer::
>>> d = np.dtype(int)
>>> d
dtype('int32')
>>> np.issubdtype(d, np.integer)
True
>>> np.issubdtype(d, np.floating)
False
Array Scalars
=============
NumPy generally returns elements of arrays as array scalars (a scalar
with an associated dtype). Array scalars differ from Python scalars, but
for the most part they can be used interchangeably (the primary
exception is for versions of Python older than v2.x, where integer array
scalars cannot act as indices for lists and tuples). There are some
exceptions, such as when code requires very specific attributes of a scalar
or when it checks specifically whether a value is a Python scalar. Generally,
problems are easily fixed by explicitly converting array scalars
to Python scalars, using the corresponding Python type function
(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``).
The primary advantage of using array scalars is that
they preserve the array type (Python may not have a matching scalar type
available, e.g. ``int16``). Therefore, the use of array scalars ensures
identical behaviour between arrays and scalars, irrespective of whether the
value is inside an array or not. NumPy scalars also have many of the same
methods arrays do.
Overflow Errors
===============
The fixed size of NumPy numeric types may cause overflow errors when a value
requires more memory than available in the data type. For example,
`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers,
but gives 1874919424 (incorrect) for a 32-bit integer.
>>> np.power(100, 8, dtype=np.int64)
10000000000000000
>>> np.power(100, 8, dtype=np.int32)
1874919424
The behaviour of NumPy and Python integer types differs significantly for
integer overflows and may confuse users expecting NumPy integers to behave
similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is
flexible. This means Python integers may expand to accommodate any integer and
will not overflow.
NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the
minimum or maximum values of NumPy integer and floating point values
respectively ::
>>> np.iinfo(np.int) # Bounds of the default integer on this system.
iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
>>> np.iinfo(np.int32) # Bounds of a 32-bit integer
iinfo(min=-2147483648, max=2147483647, dtype=int32)
>>> np.iinfo(np.int64) # Bounds of a 64-bit integer
iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
If 64-bit integers are still too small the result may be cast to a
floating point number. Floating point numbers offer a larger, but inexact,
range of possible values.
>>> np.power(100, 100, dtype=np.int64) # Incorrect even with 64-bit int
0
>>> np.power(100, 100, dtype=np.float64)
1e+200
Extended Precision
==================
Python's floating-point numbers are usually 64-bit floating-point numbers,
nearly equivalent to ``np.float64``. In some unusual situations it may be
useful to use floating-point numbers with more precision. Whether this
is possible in numpy depends on the hardware and on the development
environment: specifically, x86 machines provide hardware floating-point
with 80-bit precision, and while most C compilers provide this as their
``long double`` type, MSVC (standard for Windows builds) makes
``long double`` identical to ``double`` (64 bits). NumPy makes the
compiler's ``long double`` available as ``np.longdouble`` (and
``np.clongdouble`` for the complex numbers). You can find out what your
numpy provides with ``np.finfo(np.longdouble)``.
NumPy does not provide a dtype with more precision than C
``long double``\\s; in particular, the 128-bit IEEE quad precision
data type (FORTRAN's ``REAL*16``\\) is not available.
For efficient memory alignment, ``np.longdouble`` is usually stored
padded with zero bits, either to 96 or 128 bits. Which is more efficient
depends on hardware and development environment; typically on 32-bit
systems they are padded to 96 bits, while on 64-bit systems they are
typically padded to 128 bits. ``np.longdouble`` is padded to the system
default; ``np.float96`` and ``np.float128`` are provided for users who
want specific padding. In spite of the names, ``np.float96`` and
``np.float128`` provide only as much precision as ``np.longdouble``,
that is, 80 bits on most x86 machines and 64 bits in standard
Windows builds.
Be warned that even if ``np.longdouble`` offers more precision than
python ``float``, it is easy to lose that extra precision, since
python often forces values to pass through ``float``. For example,
the ``%`` formatting operator requires its arguments to be converted
to standard python types, and it is therefore impossible to preserve
extended precision even if many decimal places are requested. It can
be useful to test your code with the value
``1 + np.finfo(np.longdouble).eps``.
"""
from __future__ import division, absolute_import, print_function
| 32.80758 | 101 | 0.674842 |
7946c6e34069235a2492b459209f6b873cba1deb | 82 | py | Python | src/pyassets/__init__.py | skillfulhacks/xv | 861419a38e955eb841b7a25f0924f5a2dc2816a9 | [
"MIT"
] | null | null | null | src/pyassets/__init__.py | skillfulhacks/xv | 861419a38e955eb841b7a25f0924f5a2dc2816a9 | [
"MIT"
] | null | null | null | src/pyassets/__init__.py | skillfulhacks/xv | 861419a38e955eb841b7a25f0924f5a2dc2816a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Program Python Assets #
#
#
#
#
| 9.111111 | 25 | 0.54878 |
7946c7275acf28ea31247cb18685343eab5753d7 | 4,129 | py | Python | mergify_engine/logs.py | nafg/mergify-engine | 7fac6f561a0cd3481a89f2aa19cd8d4d0e39d7e8 | [
"Apache-2.0"
] | null | null | null | mergify_engine/logs.py | nafg/mergify-engine | 7fac6f561a0cd3481a89f2aa19cd8d4d0e39d7e8 | [
"Apache-2.0"
] | null | null | null | mergify_engine/logs.py | nafg/mergify-engine | 7fac6f561a0cd3481a89f2aa19cd8d4d0e39d7e8 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextvars
import logging
import os
import re
import sys
import daiquiri
import daiquiri.formatter
import ddtrace
from mergify_engine import config
LOG = daiquiri.getLogger(__name__)
logging.addLevelName(42, "TEST")
LEVEL_COLORS = daiquiri.formatter.ColorFormatter.LEVEL_COLORS.copy()
LEVEL_COLORS[42] = "\033[01;35m"
WORKER_ID: contextvars.ContextVar[str] = contextvars.ContextVar("worker_id")
class CustomFormatter(daiquiri.formatter.ColorExtrasFormatter): # type: ignore[misc]
LEVEL_COLORS = LEVEL_COLORS
def format(self, record):
if hasattr(record, "_daiquiri_extra_keys"):
record._daiquiri_extra_keys = sorted(record._daiquiri_extra_keys)
return super().format(record)
def add_extras(self, record):
super().add_extras(record)
worker_id = WORKER_ID.get(None)
if worker_id is not None:
record.extras += " " + self.extras_template.format("worker_id", worker_id)
CUSTOM_FORMATTER = CustomFormatter(
fmt="%(asctime)s [%(process)d] %(color)s%(levelname)-8.8s %(name)s: \033[1m%(message)s\033[0m%(extras)s%(color_stop)s"
)
class HerokuDatadogFormatter(daiquiri.formatter.DatadogFormatter): # type: ignore [misc]
HEROKU_LOG_EXTRAS = {
envvar: os.environ[envvar]
for envvar in ("HEROKU_RELEASE_VERSION", "HEROKU_SLUG_COMMIT")
if envvar in os.environ
}
def add_fields(self, log_record, record, message_dict):
super().add_fields(log_record, record, message_dict)
log_record.update(self.HEROKU_LOG_EXTRAS)
log_record.update(ddtrace.tracer.get_log_correlation_context())
worker_id = WORKER_ID.get(None)
if worker_id is not None:
log_record.update({"worker_id": worker_id})
def config_log() -> None:
LOG.info("##################### CONFIGURATION ######################")
for key, value in config.CONFIG.items():
name = str(key)
if (
name == "OAUTH_CLIENT_ID"
or "PRIVATE_KEY" in name
or "TOKEN" in name
or "SECRET" in name
or "PRIVATE_KEY" in name
) and value is not None:
value = "*****"
if "URL" in name and value is not None:
value = re.sub(r"://[^@]*@", "://*****@", value)
LOG.info("* MERGIFYENGINE_%s: %s", name, value)
LOG.info("* PATH: %s", os.environ.get("PATH"))
LOG.info("##########################################################")
def setup_logging(dump_config: bool = True) -> None:
outputs = []
if config.LOG_STDOUT:
outputs.append(
daiquiri.output.Stream(
sys.stdout, level=config.LOG_STDOUT_LEVEL, formatter=CUSTOM_FORMATTER
)
)
if config.LOG_DATADOG:
outputs.append(
daiquiri.output.Datadog(
level=config.LOG_DATADOG_LEVEL,
handler_class=daiquiri.handlers.PlainTextDatagramHandler,
formatter=HerokuDatadogFormatter(),
)
)
daiquiri.setup(
outputs=outputs,
level=config.LOG_LEVEL,
)
daiquiri.set_default_log_levels(
[
("github.Requester", "WARN"),
("urllib3.connectionpool", "WARN"),
("urllib3.util.retry", "WARN"),
("vcr", "WARN"),
("httpx", "WARN"),
("asyncio", "WARN"),
("uvicorn.access", "WARN"),
]
+ [(name, "DEBUG") for name in config.LOG_DEBUG_LOGGER_NAMES]
)
if dump_config:
config_log()
| 31.045113 | 122 | 0.61952 |
7946c7a80f35957b2770f649439be65d040cdf41 | 1,205 | py | Python | python/encrypt.py | mswdwk/code_test_records | 6edda193c8c19607c2021e62b96b8ff0813c7208 | [
"MIT"
] | 1 | 2019-06-06T13:34:24.000Z | 2019-06-06T13:34:24.000Z | python/encrypt.py | mswdwk/code_test_records | 6edda193c8c19607c2021e62b96b8ff0813c7208 | [
"MIT"
] | null | null | null | python/encrypt.py | mswdwk/code_test_records | 6edda193c8c19607c2021e62b96b8ff0813c7208 | [
"MIT"
] | null | null | null | #coding: utf8
import sys
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
class prpcrypt():
def __init__(self, key):
self.key = key
self.mode = AES.MODE_CBC
#加密函数,如果text不是16的倍数【加密文本text必须为16的倍数!】,那就补足为16的倍数
def encrypt(self, text):
cryptor = AES.new(self.key, self.mode, self.key)
#这里密钥key 长度必须为16(AES-128)、24(AES-192)、或32(AES-256)Bytes 长度.目前AES-128足够用
length = 16
count = len(text)
add = length - (count % length)
text = text + ('\0' * add)
self.ciphertext = cryptor.encrypt(text)
#因为AES加密时候得到的字符串不一定是ascii字符集的,输出到终端或者保存时候可能存在问题
#所以这里统一把加密后的字符串转化为16进制字符串
return b2a_hex(self.ciphertext)
#解密后,去掉补足的空格用strip() 去掉
def decrypt(self, text):
cryptor = AES.new(self.key, self.mode, self.key)
plain_text = cryptor.decrypt(a2b_hex(text))
return plain_text.rstrip('\0')
if __name__ == '__main__':
pc = prpcrypt('keyskeyskeyskeys') #初始化密钥
e = pc.encrypt("00000")
d = pc.decrypt(e)
print (e,d)
e = pc.encrypt("00000000000000000000000000")
d = pc.decrypt(e)
print (e,d)
| 31.710526 | 79 | 0.607469 |
7946c8242c9a52e09c4a4b3f4e9ecaaaa3f96f9e | 7,369 | py | Python | mautic/api.py | sharon-asana/python-mautic | 84e0d81ab223dda91dee72895363b0e86271c113 | [
"MIT"
] | 3 | 2020-11-22T00:07:41.000Z | 2021-03-02T03:32:24.000Z | mautic/api.py | sharon-asana/python-mautic | 84e0d81ab223dda91dee72895363b0e86271c113 | [
"MIT"
] | null | null | null | mautic/api.py | sharon-asana/python-mautic | 84e0d81ab223dda91dee72895363b0e86271c113 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import requests
from requests_oauthlib import OAuth2Session
import json
class MauticOauth2Client(object):
def __init__(
self,
base_url,
client_id,
client_secret=None,
scope=None,
token=None,
token_updater=None
):
"""
:param base_url: str Base URL for Mautic API E.g. `https://<your-domain>.mautic.net`
:param client_id: str Mautic API Public Key
:param client_secret: str Mautic API Secret Key - needed to autorefresh token
:param scope: list|str
:param token: dict with access token data
:param token_updater: function used for token autorefresh.
"""
if scope and not isinstance(scope, (list, tuple)):
scope = scope.split(',')
self.base_url = base_url.strip(' /')
self.access_token_url = base_url + '/oauth/v2/token'
self.authorization_base_url = base_url + '/oauth/v2/authorize'
if token_updater is not None and client_secret is not None:
kwargs = {
'auto_refresh_url': self.access_token_url,
'auto_refresh_kwargs': {
'client_id': client_id,
'client_secret': client_secret
},
'token_updater': token_updater
}
else:
kwargs = {}
self.session = OAuth2Session(
client_id, scope=scope, token=token, **kwargs
)
class MauticBasicAuthClient(object):
def __init__(
self,
base_url,
username,
password
):
"""
:param base_url: str Base URL for Mautic API E.g. `https://<your-domain>.mautic.net`
:param username: str Mautic Username
:param password: str Mautic Password
"""
self.base_url = base_url.strip(' /')
self.session = requests.Session()
self.session.auth = (username, password)
class API(object):
_endpoint = ''
def __init__(self, client):
self._client = client
self.endpoint_url = '{base_url}/api/{endpoint}'.format(
base_url=self._client.base_url,
endpoint=self._endpoint.strip(' /')
)
@staticmethod
def process_response(response):
if response.ok:
return response.json()
try:
return response.json()
except ValueError:
# no json object could be decoded
return response.content
@staticmethod
def action_not_supported(action):
"""
Returns a not supported error
:param action: str
:return: dict
"""
return {
'error': {
'code': 500,
'message':
'{action} is not supported at this time'.format(action=action)
}
}
def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_list(
self,
search='',
where={},
start=0,
limit=0,
order_by='',
order_by_dir='ASC',
published_only=False,
minimal=False
):
"""
Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str
"""
parameters = {}
args = ['search', 'start', 'limit', 'minimal', 'where']
for arg in args:
if arg in locals() and locals()[arg]:
if arg == 'where':
for key, val in where.items():
parameters[key] = val
else:
parameters[arg] = locals()[arg]
if order_by:
parameters['orderBy'] = order_by
if order_by_dir:
parameters['orderByDir'] = order_by_dir
if published_only:
parameters['publishedOnly'] = 'true'
response = self._client.session.get(
self.endpoint_url, params=parameters
)
return self.process_response(response)
def get_published_list(
self, search='', start=0, limit=0, order_by='', order_by_dir='ASC'
):
"""
Proxy function to get_list with published_only set to True
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
return self.get_list(
search=search,
start=start,
limit=limit,
order_by=order_by,
order_by_dir=order_by_dir,
published_only=True
)
def create(self, parameters):
"""
Create a new item (if supported)
:param parameters: dict
:return: dict|str
"""
response = self._client.session.post(
'{url}/new'.format(url=self.endpoint_url), data=parameters
)
return self.process_response(response)
def create_batch(self,parameters):
"""
Edit using the new/batch option
:param paramaters: list|dict (list of dictionaries)
"""
data = json.dumps(parameters)
response = self._client.session.post(
'{url}/batch/new'.format(url=self.endpoint_url),
data=data
)
return self.process_response(response)
def edit(self, obj_id, parameters, create_if_not_exists=False):
"""
Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str
"""
if create_if_not_exists:
response = self._client.session.put(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
else:
response = self._client.session.patch(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
return self.process_response(response)
def edit_batch(self,parameters):
"""
Edit using the edit/batch option
:param paramaters: list|dict (list of dictionaries)
"""
data = json.dumps(parameters)
response = self._client.session.patch(
'{url}/batch/edit'.format(url=self.endpoint_url),
data=data
)
return self.process_response(response)
def delete(self, obj_id):
"""
Delete an item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.delete(
'{url}/{id}/delete'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
| 27.599251 | 92 | 0.535758 |
7946c9a5ca3d1a4fef50a150f20e6788941010d9 | 51,767 | py | Python | keystone/tests/unit/test_v3_resource.py | whitepages/keystone | 7a0874f6f69852584061fa384f75dfb0d5f1c229 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/test_v3_resource.py | whitepages/keystone | 7a0874f6f69852584061fa384f75dfb0d5f1c229 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/test_v3_resource.py | whitepages/keystone | 7a0874f6f69852584061fa384f75dfb0d5f1c229 | [
"Apache-2.0"
] | 1 | 2021-03-21T11:38:30.000Z | 2021-03-21T11:38:30.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from six.moves import http_client
from six.moves import range
from keystone.common import controller
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import test_v3
CONF = cfg.CONF
class ResourceTestCase(test_v3.RestfulTestCase,
test_v3.AssignmentTestMixin):
"""Test domains and projects."""
# Domain CRUD tests
def test_create_domain(self):
"""Call ``POST /domains``."""
ref = unit.new_domain_ref()
r = self.post(
'/domains',
body={'domain': ref})
return self.assertValidDomainResponse(r, ref)
def test_create_domain_case_sensitivity(self):
"""Call `POST /domains`` twice with upper() and lower() cased name."""
ref = unit.new_domain_ref()
# ensure the name is lowercase
ref['name'] = ref['name'].lower()
r = self.post(
'/domains',
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
# ensure the name is uppercase
ref['name'] = ref['name'].upper()
r = self.post(
'/domains',
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
def test_create_domain_bad_request(self):
"""Call ``POST /domains``."""
self.post('/domains', body={'domain': {}},
expected_status=http_client.BAD_REQUEST)
def test_create_domain_unsafe(self):
"""Call ``POST /domains with unsafe names``."""
unsafe_name = 'i am not / safe'
self.config_fixture.config(group='resource',
domain_name_url_safe='off')
ref = unit.new_domain_ref(name=unsafe_name)
self.post(
'/domains',
body={'domain': ref})
for config_setting in ['new', 'strict']:
self.config_fixture.config(group='resource',
domain_name_url_safe=config_setting)
ref = unit.new_domain_ref(name=unsafe_name)
self.post(
'/domains',
body={'domain': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_domain_unsafe_default(self):
"""Check default for unsafe names for ``POST /domains``."""
unsafe_name = 'i am not / safe'
# By default, we should be able to create unsafe names
ref = unit.new_domain_ref(name=unsafe_name)
self.post(
'/domains',
body={'domain': ref})
def test_list_domains(self):
"""Call ``GET /domains``."""
resource_url = '/domains'
r = self.get(resource_url)
self.assertValidDomainListResponse(r, ref=self.domain,
resource_url=resource_url)
def test_get_domain(self):
"""Call ``GET /domains/{domain_id}``."""
r = self.get('/domains/%(domain_id)s' % {
'domain_id': self.domain_id})
self.assertValidDomainResponse(r, self.domain)
def test_update_domain(self):
"""Call ``PATCH /domains/{domain_id}``."""
ref = unit.new_domain_ref()
del ref['id']
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref})
self.assertValidDomainResponse(r, ref)
def test_update_domain_unsafe(self):
"""Call ``POST /domains/{domain_id} with unsafe names``."""
unsafe_name = 'i am not / safe'
self.config_fixture.config(group='resource',
domain_name_url_safe='off')
ref = unit.new_domain_ref(name=unsafe_name)
del ref['id']
self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref})
unsafe_name = 'i am still not / safe'
for config_setting in ['new', 'strict']:
self.config_fixture.config(group='resource',
domain_name_url_safe=config_setting)
ref = unit.new_domain_ref(name=unsafe_name)
del ref['id']
self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref},
expected_status=http_client.BAD_REQUEST)
def test_update_domain_unsafe_default(self):
"""Check default for unsafe names for ``POST /domains``."""
unsafe_name = 'i am not / safe'
# By default, we should be able to create unsafe names
ref = unit.new_domain_ref(name=unsafe_name)
del ref['id']
self.patch('/domains/%(domain_id)s' % {
'domain_id': self.domain_id},
body={'domain': ref})
def test_disable_domain(self):
"""Call ``PATCH /domains/{domain_id}`` (set enabled=False)."""
# Create a 2nd set of entities in a 2nd domain
domain2 = unit.new_domain_ref()
self.resource_api.create_domain(domain2['id'], domain2)
project2 = unit.new_project_ref(domain_id=domain2['id'])
self.resource_api.create_project(project2['id'], project2)
user2 = unit.create_user(self.identity_api,
domain_id=domain2['id'],
project_id=project2['id'])
self.assignment_api.add_user_to_project(project2['id'],
user2['id'])
# First check a user in that domain can authenticate..
body = {
'auth': {
'passwordCredentials': {
'userId': user2['id'],
'password': user2['password']
},
'tenantId': project2['id']
}
}
self.admin_request(
path='/v2.0/tokens', method='POST', body=body)
auth_data = self.build_authentication_request(
user_id=user2['id'],
password=user2['password'],
project_id=project2['id'])
self.v3_create_token(auth_data)
# Now disable the domain
domain2['enabled'] = False
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': domain2['id']},
body={'domain': {'enabled': False}})
self.assertValidDomainResponse(r, domain2)
# Make sure the user can no longer authenticate, via
# either API
body = {
'auth': {
'passwordCredentials': {
'userId': user2['id'],
'password': user2['password']
},
'tenantId': project2['id']
}
}
self.admin_request(
path='/v2.0/tokens', method='POST', body=body,
expected_status=http_client.UNAUTHORIZED)
# Try looking up in v3 by name and id
auth_data = self.build_authentication_request(
user_id=user2['id'],
password=user2['password'],
project_id=project2['id'])
self.v3_create_token(auth_data,
expected_status=http_client.UNAUTHORIZED)
auth_data = self.build_authentication_request(
username=user2['name'],
user_domain_id=domain2['id'],
password=user2['password'],
project_id=project2['id'])
self.v3_create_token(auth_data,
expected_status=http_client.UNAUTHORIZED)
def test_delete_enabled_domain_fails(self):
"""Call ``DELETE /domains/{domain_id}`` (when domain enabled)."""
# Try deleting an enabled domain, which should fail
self.delete('/domains/%(domain_id)s' % {
'domain_id': self.domain['id']},
expected_status=exception.ForbiddenAction.code)
def test_delete_domain(self):
"""Call ``DELETE /domains/{domain_id}``.
The sample data set up already has a user and project that is part of
self.domain. Additionally we will create a group and a credential
within it. Since the user we will authenticate with is in this domain,
we create a another set of entities in a second domain. Deleting this
second domain should delete all these new entities. In addition,
all the entities in the regular self.domain should be unaffected
by the delete.
Test Plan:
- Create domain2 and a 2nd set of entities
- Disable domain2
- Delete domain2
- Check entities in domain2 have been deleted
- Check entities in self.domain are unaffected
"""
# Create a group and a credential in the main domain
group = unit.new_group_ref(domain_id=self.domain_id)
group = self.identity_api.create_group(group)
credential = unit.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
self.credential_api.create_credential(credential['id'], credential)
# Create a 2nd set of entities in a 2nd domain
domain2 = unit.new_domain_ref()
self.resource_api.create_domain(domain2['id'], domain2)
project2 = unit.new_project_ref(domain_id=domain2['id'])
self.resource_api.create_project(project2['id'], project2)
user2 = unit.new_user_ref(domain_id=domain2['id'],
project_id=project2['id'])
user2 = self.identity_api.create_user(user2)
group2 = unit.new_group_ref(domain_id=domain2['id'])
group2 = self.identity_api.create_group(group2)
credential2 = unit.new_credential_ref(user_id=user2['id'],
project_id=project2['id'])
self.credential_api.create_credential(credential2['id'],
credential2)
# Now disable the new domain and delete it
domain2['enabled'] = False
r = self.patch('/domains/%(domain_id)s' % {
'domain_id': domain2['id']},
body={'domain': {'enabled': False}})
self.assertValidDomainResponse(r, domain2)
self.delete('/domains/%(domain_id)s' % {'domain_id': domain2['id']})
# Check all the domain2 relevant entities are gone
self.assertRaises(exception.DomainNotFound,
self.resource_api.get_domain,
domain2['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
project2['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group,
group2['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user2['id'])
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential2['id'])
# ...and that all self.domain entities are still here
r = self.resource_api.get_domain(self.domain['id'])
self.assertDictEqual(self.domain, r)
r = self.resource_api.get_project(self.project['id'])
self.assertDictEqual(self.project, r)
r = self.identity_api.get_group(group['id'])
self.assertDictEqual(group, r)
r = self.identity_api.get_user(self.user['id'])
self.user.pop('password')
self.assertDictEqual(self.user, r)
r = self.credential_api.get_credential(credential['id'])
self.assertDictEqual(credential, r)
def test_delete_default_domain(self):
# Need to disable it first.
self.patch('/domains/%(domain_id)s' % {
'domain_id': CONF.identity.default_domain_id},
body={'domain': {'enabled': False}})
self.delete(
'/domains/%(domain_id)s' % {
'domain_id': CONF.identity.default_domain_id})
def test_token_revoked_once_domain_disabled(self):
"""Test token from a disabled domain has been invalidated.
Test that a token that was valid for an enabled domain
becomes invalid once that domain is disabled.
"""
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
user2 = unit.create_user(self.identity_api,
domain_id=domain['id'])
# build a request body
auth_body = self.build_authentication_request(
user_id=user2['id'],
password=user2['password'])
# sends a request for the user's token
token_resp = self.post('/auth/tokens', body=auth_body)
subject_token = token_resp.headers.get('x-subject-token')
# validates the returned token and it should be valid.
self.head('/auth/tokens',
headers={'x-subject-token': subject_token},
expected_status=http_client.OK)
# now disable the domain
domain['enabled'] = False
url = "/domains/%(domain_id)s" % {'domain_id': domain['id']}
self.patch(url,
body={'domain': {'enabled': False}})
# validates the same token again and it should be 'not found'
# as the domain has already been disabled.
self.head('/auth/tokens',
headers={'x-subject-token': subject_token},
expected_status=http_client.NOT_FOUND)
def test_delete_domain_hierarchy(self):
"""Call ``DELETE /domains/{domain_id}``."""
domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
root_project = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(root_project['id'], root_project)
leaf_project = unit.new_project_ref(
domain_id=domain['id'],
parent_id=root_project['id'])
self.resource_api.create_project(leaf_project['id'], leaf_project)
# Need to disable it first.
self.patch('/domains/%(domain_id)s' % {
'domain_id': domain['id']},
body={'domain': {'enabled': False}})
self.delete(
'/domains/%(domain_id)s' % {
'domain_id': domain['id']})
self.assertRaises(exception.DomainNotFound,
self.resource_api.get_domain,
domain['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
root_project['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
leaf_project['id'])
def test_forbid_operations_on_federated_domain(self):
"""Make sure one cannot operate on federated domain.
This includes operations like create, update, delete
on domain identified by id and name where difference variations of
id 'Federated' are used.
"""
def create_domains():
for variation in ('Federated', 'FEDERATED',
'federated', 'fEderated'):
domain = unit.new_domain_ref()
domain['id'] = variation
yield domain
for domain in create_domains():
self.assertRaises(
AssertionError, self.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(
AssertionError, self.resource_api.update_domain,
domain['id'], domain)
self.assertRaises(
exception.DomainNotFound, self.resource_api.delete_domain,
domain['id'])
# swap 'name' with 'id' and try again, expecting the request to
# gracefully fail
domain['id'], domain['name'] = domain['name'], domain['id']
self.assertRaises(
AssertionError, self.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(
AssertionError, self.resource_api.update_domain,
domain['id'], domain)
self.assertRaises(
exception.DomainNotFound, self.resource_api.delete_domain,
domain['id'])
def test_forbid_operations_on_defined_federated_domain(self):
"""Make sure one cannot operate on a user-defined federated domain.
This includes operations like create, update, delete.
"""
non_default_name = 'beta_federated_domain'
self.config_fixture.config(group='federation',
federated_domain_name=non_default_name)
domain = unit.new_domain_ref(name=non_default_name)
self.assertRaises(AssertionError,
self.resource_api.create_domain,
domain['id'], domain)
self.assertRaises(exception.DomainNotFound,
self.resource_api.delete_domain,
domain['id'])
self.assertRaises(AssertionError,
self.resource_api.update_domain,
domain['id'], domain)
# Project CRUD tests
def test_list_projects(self):
"""Call ``GET /projects``."""
resource_url = '/projects'
r = self.get(resource_url)
self.assertValidProjectListResponse(r, ref=self.project,
resource_url=resource_url)
def test_create_project(self):
"""Call ``POST /projects``."""
ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post(
'/projects',
body={'project': ref})
self.assertValidProjectResponse(r, ref)
def test_create_project_bad_request(self):
"""Call ``POST /projects``."""
self.post('/projects', body={'project': {}},
expected_status=http_client.BAD_REQUEST)
def test_create_project_invalid_domain_id(self):
"""Call ``POST /projects``."""
ref = unit.new_project_ref(domain_id=uuid.uuid4().hex)
self.post('/projects', body={'project': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_project_unsafe(self):
"""Call ``POST /projects with unsafe names``."""
unsafe_name = 'i am not / safe'
self.config_fixture.config(group='resource',
project_name_url_safe='off')
ref = unit.new_project_ref(name=unsafe_name)
self.post(
'/projects',
body={'project': ref})
for config_setting in ['new', 'strict']:
self.config_fixture.config(group='resource',
project_name_url_safe=config_setting)
ref = unit.new_project_ref(name=unsafe_name)
self.post(
'/projects',
body={'project': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_project_unsafe_default(self):
"""Check default for unsafe names for ``POST /projects``."""
unsafe_name = 'i am not / safe'
# By default, we should be able to create unsafe names
ref = unit.new_project_ref(name=unsafe_name)
self.post(
'/projects',
body={'project': ref})
def test_create_project_is_domain_not_allowed(self):
"""Call ``POST /projects``.
Setting is_domain=True is not supported yet and should raise
NotImplemented.
"""
ref = unit.new_project_ref(domain_id=self.domain_id, is_domain=True)
self.post('/projects',
body={'project': ref},
expected_status=http_client.NOT_IMPLEMENTED)
def test_create_project_with_parent_id_none_and_domain_id_none(self):
"""Call ``POST /projects``."""
# Grant a domain role for the user
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domain_id,
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url)
# Create an authentication request for a domain scoped token
auth = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
domain_id=self.domain_id)
# Without parent_id and domain_id passed as None, the domain_id should
# be normalized to the domain on the token, when using a domain
# scoped token.
ref = unit.new_project_ref()
r = self.post(
'/projects',
auth=auth,
body={'project': ref})
ref['domain_id'] = self.domain['id']
self.assertValidProjectResponse(r, ref)
def test_create_project_without_parent_id_and_without_domain_id(self):
"""Call ``POST /projects``."""
# Grant a domain role for the user
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
'domain_id': self.domain_id,
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id}
self.put(member_url)
# Create an authentication request for a domain scoped token
auth = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
domain_id=self.domain_id)
# Without domain_id and parent_id, the domain_id should be
# normalized to the domain on the token, when using a domain
# scoped token.
ref = unit.new_project_ref()
r = self.post(
'/projects',
auth=auth,
body={'project': ref})
ref['domain_id'] = self.domain['id']
self.assertValidProjectResponse(r, ref)
def test_create_project_with_parent_id_and_no_domain_id(self):
"""Call ``POST /projects``."""
# With only the parent_id, the domain_id should be
# normalized to the parent's domain_id
ref_child = unit.new_project_ref(parent_id=self.project['id'])
r = self.post(
'/projects',
body={'project': ref_child})
self.assertEqual(r.result['project']['domain_id'],
self.project['domain_id'])
ref_child['domain_id'] = self.domain['id']
self.assertValidProjectResponse(r, ref_child)
def _create_projects_hierarchy(self, hierarchy_size=1):
"""Creates a single-branched project hierarchy with the specified size.
:param hierarchy_size: the desired hierarchy size, default is 1 -
a project with one child.
:returns projects: a list of the projects in the created hierarchy.
"""
new_ref = unit.new_project_ref(domain_id=self.domain_id)
resp = self.post('/projects', body={'project': new_ref})
projects = [resp.result]
for i in range(hierarchy_size):
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[i]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
return projects
def test_list_projects_filtering_by_parent_id(self):
"""Call ``GET /projects?parent_id={project_id}``."""
projects = self._create_projects_hierarchy(hierarchy_size=2)
# Add another child to projects[1] - it will be projects[3]
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[1]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
# Query for projects[0] immediate children - it will
# be only projects[1]
r = self.get(
'/projects?parent_id=%(project_id)s' % {
'project_id': projects[0]['project']['id']})
self.assertValidProjectListResponse(r)
projects_result = r.result['projects']
expected_list = [projects[1]['project']]
# projects[0] has projects[1] as child
self.assertEqual(expected_list, projects_result)
# Query for projects[1] immediate children - it will
# be projects[2] and projects[3]
r = self.get(
'/projects?parent_id=%(project_id)s' % {
'project_id': projects[1]['project']['id']})
self.assertValidProjectListResponse(r)
projects_result = r.result['projects']
expected_list = [projects[2]['project'], projects[3]['project']]
# projects[1] has projects[2] and projects[3] as children
self.assertEqual(expected_list, projects_result)
# Query for projects[2] immediate children - it will be an empty list
r = self.get(
'/projects?parent_id=%(project_id)s' % {
'project_id': projects[2]['project']['id']})
self.assertValidProjectListResponse(r)
projects_result = r.result['projects']
expected_list = []
# projects[2] has no child, projects_result must be an empty list
self.assertEqual(expected_list, projects_result)
def test_create_hierarchical_project(self):
"""Call ``POST /projects``."""
self._create_projects_hierarchy()
def test_get_project(self):
"""Call ``GET /projects/{project_id}``."""
r = self.get(
'/projects/%(project_id)s' % {
'project_id': self.project_id})
self.assertValidProjectResponse(r, self.project)
def test_get_project_with_parents_as_list_with_invalid_id(self):
"""Call ``GET /projects/{project_id}?parents_as_list``."""
self.get('/projects/%(project_id)s?parents_as_list' % {
'project_id': None}, expected_status=http_client.NOT_FOUND)
self.get('/projects/%(project_id)s?parents_as_list' % {
'project_id': uuid.uuid4().hex},
expected_status=http_client.NOT_FOUND)
def test_get_project_with_subtree_as_list_with_invalid_id(self):
"""Call ``GET /projects/{project_id}?subtree_as_list``."""
self.get('/projects/%(project_id)s?subtree_as_list' % {
'project_id': None}, expected_status=http_client.NOT_FOUND)
self.get('/projects/%(project_id)s?subtree_as_list' % {
'project_id': uuid.uuid4().hex},
expected_status=http_client.NOT_FOUND)
def test_get_project_with_parents_as_ids(self):
"""Call ``GET /projects/{project_id}?parents_as_ids``."""
projects = self._create_projects_hierarchy(hierarchy_size=2)
# Query for projects[2] parents_as_ids
r = self.get(
'/projects/%(project_id)s?parents_as_ids' % {
'project_id': projects[2]['project']['id']})
self.assertValidProjectResponse(r, projects[2]['project'])
parents_as_ids = r.result['project']['parents']
# Assert parents_as_ids is a structured dictionary correctly
# representing the hierarchy. The request was made using projects[2]
# id, hence its parents should be projects[1] and projects[0]. It
# should have the following structure:
# {
# projects[1]: {
# projects[0]: None
# }
# }
expected_dict = {
projects[1]['project']['id']: {
projects[0]['project']['id']: None
}
}
self.assertDictEqual(expected_dict, parents_as_ids)
# Query for projects[0] parents_as_ids
r = self.get(
'/projects/%(project_id)s?parents_as_ids' % {
'project_id': projects[0]['project']['id']})
self.assertValidProjectResponse(r, projects[0]['project'])
parents_as_ids = r.result['project']['parents']
# projects[0] has no parents, parents_as_ids must be None
self.assertIsNone(parents_as_ids)
def test_get_project_with_parents_as_list_with_full_access(self):
"""``GET /projects/{project_id}?parents_as_list`` with full access.
Test plan:
- Create 'parent', 'project' and 'subproject' projects;
- Assign a user a role on each one of those projects;
- Check that calling parents_as_list on 'subproject' returns both
'project' and 'parent'.
"""
# Create the project hierarchy
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on all the created projects
for proj in (parent, project, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# Make the API call
r = self.get('/projects/%(project_id)s?parents_as_list' %
{'project_id': subproject['project']['id']})
self.assertValidProjectResponse(r, subproject['project'])
# Assert only 'project' and 'parent' are in the parents list
self.assertIn(project, r.result['project']['parents'])
self.assertIn(parent, r.result['project']['parents'])
self.assertEqual(2, len(r.result['project']['parents']))
def test_get_project_with_parents_as_list_with_partial_access(self):
"""``GET /projects/{project_id}?parents_as_list`` with partial access.
Test plan:
- Create 'parent', 'project' and 'subproject' projects;
- Assign a user a role on 'parent' and 'subproject';
- Check that calling parents_as_list on 'subproject' only returns
'parent'.
"""
# Create the project hierarchy
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on parent and subproject
for proj in (parent, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# Make the API call
r = self.get('/projects/%(project_id)s?parents_as_list' %
{'project_id': subproject['project']['id']})
self.assertValidProjectResponse(r, subproject['project'])
# Assert only 'parent' is in the parents list
self.assertIn(parent, r.result['project']['parents'])
self.assertEqual(1, len(r.result['project']['parents']))
def test_get_project_with_parents_as_list_and_parents_as_ids(self):
"""Attempt to list a project's parents as both a list and as IDs.
This uses ``GET /projects/{project_id}?parents_as_list&parents_as_ids``
which should fail with a Bad Request due to the conflicting query
strings.
"""
projects = self._create_projects_hierarchy(hierarchy_size=2)
self.get(
'/projects/%(project_id)s?parents_as_list&parents_as_ids' % {
'project_id': projects[1]['project']['id']},
expected_status=http_client.BAD_REQUEST)
def test_get_project_with_subtree_as_ids(self):
"""Call ``GET /projects/{project_id}?subtree_as_ids``.
This test creates a more complex hierarchy to test if the structured
dictionary returned by using the ``subtree_as_ids`` query param
correctly represents the hierarchy.
The hierarchy contains 5 projects with the following structure::
+--A--+
| |
+--B--+ C
| |
D E
"""
projects = self._create_projects_hierarchy(hierarchy_size=2)
# Add another child to projects[0] - it will be projects[3]
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[0]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
# Add another child to projects[1] - it will be projects[4]
new_ref = unit.new_project_ref(
domain_id=self.domain_id,
parent_id=projects[1]['project']['id'])
resp = self.post('/projects',
body={'project': new_ref})
self.assertValidProjectResponse(resp, new_ref)
projects.append(resp.result)
# Query for projects[0] subtree_as_ids
r = self.get(
'/projects/%(project_id)s?subtree_as_ids' % {
'project_id': projects[0]['project']['id']})
self.assertValidProjectResponse(r, projects[0]['project'])
subtree_as_ids = r.result['project']['subtree']
# The subtree hierarchy from projects[0] should have the following
# structure:
# {
# projects[1]: {
# projects[2]: None,
# projects[4]: None
# },
# projects[3]: None
# }
expected_dict = {
projects[1]['project']['id']: {
projects[2]['project']['id']: None,
projects[4]['project']['id']: None
},
projects[3]['project']['id']: None
}
self.assertDictEqual(expected_dict, subtree_as_ids)
# Now query for projects[1] subtree_as_ids
r = self.get(
'/projects/%(project_id)s?subtree_as_ids' % {
'project_id': projects[1]['project']['id']})
self.assertValidProjectResponse(r, projects[1]['project'])
subtree_as_ids = r.result['project']['subtree']
# The subtree hierarchy from projects[1] should have the following
# structure:
# {
# projects[2]: None,
# projects[4]: None
# }
expected_dict = {
projects[2]['project']['id']: None,
projects[4]['project']['id']: None
}
self.assertDictEqual(expected_dict, subtree_as_ids)
# Now query for projects[3] subtree_as_ids
r = self.get(
'/projects/%(project_id)s?subtree_as_ids' % {
'project_id': projects[3]['project']['id']})
self.assertValidProjectResponse(r, projects[3]['project'])
subtree_as_ids = r.result['project']['subtree']
# projects[3] has no subtree, subtree_as_ids must be None
self.assertIsNone(subtree_as_ids)
def test_get_project_with_subtree_as_list_with_full_access(self):
"""``GET /projects/{project_id}?subtree_as_list`` with full access.
Test plan:
- Create 'parent', 'project' and 'subproject' projects;
- Assign a user a role on each one of those projects;
- Check that calling subtree_as_list on 'parent' returns both 'parent'
and 'subproject'.
"""
# Create the project hierarchy
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on all the created projects
for proj in (parent, project, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# Make the API call
r = self.get('/projects/%(project_id)s?subtree_as_list' %
{'project_id': parent['project']['id']})
self.assertValidProjectResponse(r, parent['project'])
# Assert only 'project' and 'subproject' are in the subtree
self.assertIn(project, r.result['project']['subtree'])
self.assertIn(subproject, r.result['project']['subtree'])
self.assertEqual(2, len(r.result['project']['subtree']))
def test_get_project_with_subtree_as_list_with_partial_access(self):
"""``GET /projects/{project_id}?subtree_as_list`` with partial access.
Test plan:
- Create 'parent', 'project' and 'subproject' projects;
- Assign a user a role on 'parent' and 'subproject';
- Check that calling subtree_as_list on 'parent' returns 'subproject'.
"""
# Create the project hierarchy
parent, project, subproject = self._create_projects_hierarchy(2)
# Assign a role for the user on parent and subproject
for proj in (parent, subproject):
self.put(self.build_role_assignment_link(
role_id=self.role_id, user_id=self.user_id,
project_id=proj['project']['id']))
# Make the API call
r = self.get('/projects/%(project_id)s?subtree_as_list' %
{'project_id': parent['project']['id']})
self.assertValidProjectResponse(r, parent['project'])
# Assert only 'subproject' is in the subtree
self.assertIn(subproject, r.result['project']['subtree'])
self.assertEqual(1, len(r.result['project']['subtree']))
def test_get_project_with_subtree_as_list_and_subtree_as_ids(self):
"""Attempt to get a project subtree as both a list and as IDs.
This uses ``GET /projects/{project_id}?subtree_as_list&subtree_as_ids``
which should fail with a bad request due to the conflicting query
strings.
"""
projects = self._create_projects_hierarchy(hierarchy_size=2)
self.get(
'/projects/%(project_id)s?subtree_as_list&subtree_as_ids' % {
'project_id': projects[1]['project']['id']},
expected_status=http_client.BAD_REQUEST)
def test_update_project(self):
"""Call ``PATCH /projects/{project_id}``."""
ref = unit.new_project_ref(domain_id=self.domain_id)
del ref['id']
r = self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref})
self.assertValidProjectResponse(r, ref)
def test_update_project_unsafe(self):
"""Call ``POST /projects/{project_id} with unsafe names``."""
unsafe_name = 'i am not / safe'
self.config_fixture.config(group='resource',
project_name_url_safe='off')
ref = unit.new_project_ref(name=unsafe_name,
domain_id=self.domain_id)
del ref['id']
self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref})
unsafe_name = 'i am still not / safe'
for config_setting in ['new', 'strict']:
self.config_fixture.config(group='resource',
project_name_url_safe=config_setting)
ref = unit.new_project_ref(name=unsafe_name,
domain_id=self.domain_id)
del ref['id']
self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref},
expected_status=http_client.BAD_REQUEST)
def test_update_project_unsafe_default(self):
"""Check default for unsafe names for ``POST /projects``."""
unsafe_name = 'i am not / safe'
# By default, we should be able to create unsafe names
ref = unit.new_project_ref(name=unsafe_name,
domain_id=self.domain_id)
del ref['id']
self.patch(
'/projects/%(project_id)s' % {
'project_id': self.project_id},
body={'project': ref})
def test_update_project_domain_id(self):
"""Call ``PATCH /projects/{project_id}`` with domain_id."""
project = unit.new_project_ref(domain_id=self.domain['id'])
self.resource_api.create_project(project['id'], project)
project['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/projects/%(project_id)s' % {
'project_id': project['id']},
body={'project': project},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
project['domain_id'] = self.domain['id']
r = self.patch('/projects/%(project_id)s' % {
'project_id': project['id']},
body={'project': project})
self.assertValidProjectResponse(r, project)
def test_update_project_parent_id(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
leaf_project = projects[1]['project']
leaf_project['parent_id'] = None
self.patch(
'/projects/%(project_id)s' % {
'project_id': leaf_project['id']},
body={'project': leaf_project},
expected_status=http_client.FORBIDDEN)
def test_update_project_is_domain_not_allowed(self):
"""Call ``PATCH /projects/{project_id}`` with is_domain.
The is_domain flag is immutable.
"""
project = unit.new_project_ref(domain_id=self.domain['id'])
resp = self.post('/projects',
body={'project': project})
self.assertFalse(resp.result['project']['is_domain'])
project['is_domain'] = True
self.patch('/projects/%(project_id)s' % {
'project_id': resp.result['project']['id']},
body={'project': project},
expected_status=http_client.BAD_REQUEST)
def test_disable_leaf_project(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
leaf_project = projects[1]['project']
leaf_project['enabled'] = False
r = self.patch(
'/projects/%(project_id)s' % {
'project_id': leaf_project['id']},
body={'project': leaf_project})
self.assertEqual(
leaf_project['enabled'], r.result['project']['enabled'])
def test_disable_not_leaf_project(self):
"""Call ``PATCH /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
root_project = projects[0]['project']
root_project['enabled'] = False
self.patch(
'/projects/%(project_id)s' % {
'project_id': root_project['id']},
body={'project': root_project},
expected_status=http_client.FORBIDDEN)
def test_delete_project(self):
"""Call ``DELETE /projects/{project_id}``
As well as making sure the delete succeeds, we ensure
that any credentials that reference this projects are
also deleted, while other credentials are unaffected.
"""
credential = unit.new_credential_ref(user_id=self.user['id'],
project_id=self.project_id)
self.credential_api.create_credential(credential['id'], credential)
# First check the credential for this project is present
r = self.credential_api.get_credential(credential['id'])
self.assertDictEqual(credential, r)
# Create a second credential with a different project
project2 = unit.new_project_ref(domain_id=self.domain['id'])
self.resource_api.create_project(project2['id'], project2)
credential2 = unit.new_credential_ref(user_id=self.user['id'],
project_id=project2['id'])
self.credential_api.create_credential(credential2['id'], credential2)
# Now delete the project
self.delete(
'/projects/%(project_id)s' % {
'project_id': self.project_id})
# Deleting the project should have deleted any credentials
# that reference this project
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
credential_id=credential['id'])
# But the credential for project2 is unaffected
r = self.credential_api.get_credential(credential2['id'])
self.assertDictEqual(credential2, r)
def test_delete_not_leaf_project(self):
"""Call ``DELETE /projects/{project_id}``."""
projects = self._create_projects_hierarchy()
self.delete(
'/projects/%(project_id)s' % {
'project_id': projects[0]['project']['id']},
expected_status=http_client.FORBIDDEN)
class ResourceV3toV2MethodsTestCase(unit.TestCase):
"""Test domain V3 to V2 conversion methods."""
def _setup_initial_projects(self):
self.project_id = uuid.uuid4().hex
self.domain_id = CONF.identity.default_domain_id
self.parent_id = uuid.uuid4().hex
# Project with only domain_id in ref
self.project1 = unit.new_project_ref(id=self.project_id,
name=self.project_id,
domain_id=self.domain_id)
# Project with both domain_id and parent_id in ref
self.project2 = unit.new_project_ref(id=self.project_id,
name=self.project_id,
domain_id=self.domain_id,
parent_id=self.parent_id)
# Project with no domain_id and parent_id in ref
self.project3 = unit.new_project_ref(id=self.project_id,
name=self.project_id,
domain_id=self.domain_id,
parent_id=self.parent_id)
# Expected result with no domain_id and parent_id
self.expected_project = {'id': self.project_id,
'name': self.project_id}
def test_v2controller_filter_domain_id(self):
# V2.0 is not domain aware, ensure domain_id is popped off the ref.
other_data = uuid.uuid4().hex
domain_id = CONF.identity.default_domain_id
ref = {'domain_id': domain_id,
'other_data': other_data}
ref_no_domain = {'other_data': other_data}
expected_ref = ref_no_domain.copy()
updated_ref = controller.V2Controller.filter_domain_id(ref)
self.assertIs(ref, updated_ref)
self.assertDictEqual(expected_ref, ref)
# Make sure we don't error/muck up data if domain_id isn't present
updated_ref = controller.V2Controller.filter_domain_id(ref_no_domain)
self.assertIs(ref_no_domain, updated_ref)
self.assertDictEqual(expected_ref, ref_no_domain)
def test_v3controller_filter_domain_id(self):
# No data should be filtered out in this case.
other_data = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
ref = {'domain_id': domain_id,
'other_data': other_data}
expected_ref = ref.copy()
updated_ref = controller.V3Controller.filter_domain_id(ref)
self.assertIs(ref, updated_ref)
self.assertDictEqual(expected_ref, ref)
def test_v2controller_filter_domain(self):
other_data = uuid.uuid4().hex
domain_id = uuid.uuid4().hex
non_default_domain_ref = {'domain': {'id': domain_id},
'other_data': other_data}
default_domain_ref = {'domain': {'id': 'default'},
'other_data': other_data}
updated_ref = controller.V2Controller.filter_domain(default_domain_ref)
self.assertNotIn('domain', updated_ref)
self.assertNotIn(
'domain',
controller.V2Controller.filter_domain(non_default_domain_ref))
def test_v2controller_filter_project_parent_id(self):
# V2.0 is not project hierarchy aware, ensure parent_id is popped off.
other_data = uuid.uuid4().hex
parent_id = uuid.uuid4().hex
ref = {'parent_id': parent_id,
'other_data': other_data}
ref_no_parent = {'other_data': other_data}
expected_ref = ref_no_parent.copy()
updated_ref = controller.V2Controller.filter_project_parent_id(ref)
self.assertIs(ref, updated_ref)
self.assertDictEqual(expected_ref, ref)
# Make sure we don't error/muck up data if parent_id isn't present
updated_ref = controller.V2Controller.filter_project_parent_id(
ref_no_parent)
self.assertIs(ref_no_parent, updated_ref)
self.assertDictEqual(expected_ref, ref_no_parent)
def test_v3_to_v2_project_method(self):
self._setup_initial_projects()
# TODO(shaleh): these optional fields are not handled well by the
# v3_to_v2 code. Manually remove them for now. Eventually update
# new_project_ref to not return optional values
del self.project1['enabled']
del self.project1['description']
del self.project2['enabled']
del self.project2['description']
del self.project3['enabled']
del self.project3['description']
updated_project1 = controller.V2Controller.v3_to_v2_project(
self.project1)
self.assertIs(self.project1, updated_project1)
self.assertDictEqual(self.expected_project, self.project1)
updated_project2 = controller.V2Controller.v3_to_v2_project(
self.project2)
self.assertIs(self.project2, updated_project2)
self.assertDictEqual(self.expected_project, self.project2)
updated_project3 = controller.V2Controller.v3_to_v2_project(
self.project3)
self.assertIs(self.project3, updated_project3)
self.assertDictEqual(self.expected_project, self.project2)
def test_v3_to_v2_project_method_list(self):
self._setup_initial_projects()
project_list = [self.project1, self.project2, self.project3]
# TODO(shaleh): these optional fields are not handled well by the
# v3_to_v2 code. Manually remove them for now. Eventually update
# new_project_ref to not return optional values
for p in project_list:
del p['enabled']
del p['description']
updated_list = controller.V2Controller.v3_to_v2_project(project_list)
self.assertEqual(len(updated_list), len(project_list))
for i, ref in enumerate(updated_list):
# Order should not change.
self.assertIs(ref, project_list[i])
self.assertDictEqual(self.expected_project, self.project1)
self.assertDictEqual(self.expected_project, self.project2)
self.assertDictEqual(self.expected_project, self.project3)
| 40.442969 | 79 | 0.594259 |
7946c9b296a95e1feefc6cb19ee0841dc646637d | 4,374 | py | Python | src/wheel/vendored/packaging/_musllinux.py | hugovk/wheel | 0e07002ac82efa883061ac91c476d057b8d4d20d | [
"MIT"
] | null | null | null | src/wheel/vendored/packaging/_musllinux.py | hugovk/wheel | 0e07002ac82efa883061ac91c476d057b8d4d20d | [
"MIT"
] | null | null | null | src/wheel/vendored/packaging/_musllinux.py | hugovk/wheel | 0e07002ac82efa883061ac91c476d057b8d4d20d | [
"MIT"
] | null | null | null | """PEP 656 support.
This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""
from __future__ import annotations
import contextlib
import functools
import operator
import os
import re
import struct
import subprocess
import sys
from typing import IO, Iterator, NamedTuple
def _read_unpacked(f: IO[bytes], fmt: str) -> tuple[int, ...]:
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
def _parse_ld_musl_from_elf(f: IO[bytes]) -> str | None:
"""Detect musl libc location by parsing the Python executable.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
f.seek(0)
try:
ident = _read_unpacked(f, "16B")
except struct.error:
return None
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
return None
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, p_fmt, p_idx = {
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
}[ident[4]]
except KeyError:
return None
else:
p_get = operator.itemgetter(*p_idx)
# Find the interpreter section and return its content.
try:
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
except struct.error:
return None
for i in range(e_phnum + 1):
f.seek(e_phoff + e_phentsize * i)
try:
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
except struct.error:
return None
if p_type != 3: # Not PT_INTERP.
continue
f.seek(p_offset)
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
if "musl" not in interpreter:
return None
return interpreter
return None
class _MuslVersion(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> _MuslVersion | None:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache()
def _get_musl_version(executable: str) -> _MuslVersion | None:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
with contextlib.ExitStack() as stack:
try:
f = stack.enter_context(open(executable, "rb"))
except OSError:
return None
ld = _parse_ld_musl_from_elf(f)
if not ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)
return _parse_musl_version(proc.stderr)
def platform_tags(arch: str) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param arch: Should be the part of platform tag after the ``linux_``
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
prerequisite for the current platform to be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
return
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
| 31.467626 | 80 | 0.638775 |
7946ca15306944b2acf91f6d480518c20dd6059e | 1,302 | py | Python | assignment/assignment2/python/preprocess.py | maple1eaf/data_mining_inf553 | fba0c19f46aac5882e103dbe53155e7128a9290f | [
"MIT"
] | 1 | 2021-05-04T05:17:57.000Z | 2021-05-04T05:17:57.000Z | assignment/assignment2/python/preprocess.py | maple1eaf/data_mining_inf553 | fba0c19f46aac5882e103dbe53155e7128a9290f | [
"MIT"
] | null | null | null | assignment/assignment2/python/preprocess.py | maple1eaf/data_mining_inf553 | fba0c19f46aac5882e103dbe53155e7128a9290f | [
"MIT"
] | 1 | 2021-09-24T08:17:23.000Z | 2021-09-24T08:17:23.000Z | """
spark-submit preprocess.py <business_file_path> <review_file_path> <output_file_path>
spark-submit preprocess.py "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/homework/hw2/dataset/business.json" "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/homework/hw2/dataset/review.json" "/Users/markduan/duan/USC_course/USC_APDS/INF553/homework/hw2/dataset/task2_data.csv"
"""
import csv
import json
import sys
import time
from pyspark import SparkConf, SparkContext
business_file = sys.argv[1]
review_file = sys.argv[2]
output_file = sys.argv[3]
time0 = time.time()
# define spark env
conf = SparkConf() \
.setAppName("task1") \
.setMaster("local[*]")
sc = SparkContext(conf=conf)
business_data = sc.textFile(business_file) \
.map(json.loads) \
.map(lambda x: (x["business_id"], x["state"])) \
.filter(lambda x: x[1] == "NV")
review_data = sc.textFile(review_file) \
.map(json.loads) \
.map(lambda x: (x["business_id"], x["user_id"]))
user_business = review_data.join(business_data) \
.map(lambda x: (x[1][0], x[0])) \
.collect()
header = ["user_id", "business_id"]
with open(output_file, 'w') as fp:
wr = csv.writer(fp)
wr.writerow(header)
wr.writerows(user_business)
time1 = time.time()
print("consume %fs." % (time1-time0))
| 28.304348 | 294 | 0.698925 |
7946ca368e2daf9fbe8215ecd41671c24f448289 | 22,327 | py | Python | utils/raw_data_utils.py | cogito233/text-autoaugment | cae3cfddaba9da01cf291f975e5cf4f734634b51 | [
"MIT"
] | 1 | 2021-09-08T12:00:11.000Z | 2021-09-08T12:00:11.000Z | utils/raw_data_utils.py | cogito233/text-autoaugment | cae3cfddaba9da01cf291f975e5cf4f734634b51 | [
"MIT"
] | null | null | null | utils/raw_data_utils.py | cogito233/text-autoaugment | cae3cfddaba9da01cf291f975e5cf4f734634b51 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn.model_selection import StratifiedShuffleSplit, KFold
import torchtext
import csv
import os
import numpy as np
import random
import pandas as pd
import re
from theconf import Config as C
from common import get_logger
import logging
logger = get_logger('Text AutoAugment')
logger.setLevel(logging.INFO)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, raw_data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, raw_data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def get_train_size(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None, delimiter="\t"):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def clean_web_text(st):
"""clean text."""
st = st.replace("<br />", " ")
st = st.replace(""", "\"")
st = st.replace("<p>", " ")
if "<a href=" in st:
# print("before:\n", st)
while "<a href=" in st:
start_pos = st.find("<a href=")
end_pos = st.find(">", start_pos)
if end_pos != -1:
st = st[:start_pos] + st[end_pos + 1:]
else:
print("incomplete href")
print("before", st)
st = st[:start_pos] + st[start_pos + len("<a href=")]
print("after", st)
st = st.replace("</a>", "")
# print("after\n", st)
# print("")
st = st.replace("\\n", " ")
st = st.replace("\\", " ")
# while " " in st:
# st = st.replace(" ", " ")
return st
def subsample_by_classes(all_examples, labels, tag):
if C.get()['ir'] == 1:
return all_examples
num_per_class = {label: sum([e.label == label for e in all_examples]) for label in labels}
logger.info("{}_num_per_class before: {}".format(tag, num_per_class))
num_per_class[labels[0]] = round(num_per_class[labels[0]] * C.get()['ir'])
logger.info("{}_num_per_class after: {}".format(tag, num_per_class))
examples = {label: [] for label in labels}
for example in all_examples:
examples[example.label].append(example)
selected_examples = []
for label in labels:
random.seed(C.get()['seed'])
random.shuffle(examples[label])
num_in_class = num_per_class[label]
selected_examples = selected_examples + examples[label][:num_in_class]
return selected_examples
class IMDbProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, raw_data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, "train.csv"), quotechar='"'), "train")
def get_test_examples(self, raw_data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, "test.csv"), quotechar='"'), "test")
def get_unsup_examples(self, raw_data_dir, unsup_set):
"""See base class."""
if unsup_set == "unsup_ext":
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, "unsup_ext.csv"), quotechar='"'),
"unsup_ext", skip_unsup=False)
elif unsup_set == "unsup_in":
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, "train.csv"), quotechar='"'),
"unsup_in", skip_unsup=False)
def get_labels(self):
"""See base class."""
return ["pos", "neg"]
def _create_examples(self, lines, set_type, skip_unsup=True):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if skip_unsup and line[1] == "unsup":
continue
if line[1] == "unsup" and len(line[0]) < 500:
# tf.logging.info("skipping short samples:{:s}".format(line[0]))
continue
guid = "%s-%s" % (set_type, line[2])
text_a = line[0]
label = line[1]
text_a = clean_web_text(text_a)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_train_size(self):
return 25000
def get_test_size(self):
return 25000
def split(self, examples, test_size, train_size, n_splits=2, split_idx=0):
label_map = {"pos": 0, "neg": 1}
labels = [label_map[e.label] for e in examples]
kf = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size, train_size=train_size,
random_state=C.get()['seed'])
kf = kf.split(list(range(len(examples))), labels)
for _ in range(split_idx + 1): # split_idx equal to cv_fold. this loop is used to get i-th fold
train_idx, valid_idx = next(kf)
train_dev_set = np.array(examples)
return list(train_dev_set[train_idx]), list(train_dev_set[valid_idx])
class STSProcessor(DataProcessor):
def get_train_examples(self, raw_data_dir):
return self._create_examples(
pd.read_csv(os.path.join(raw_data_dir, 'sts-train-dev.csv'), header=None, sep='\t', quoting=csv.QUOTE_NONE,
encoding='utf-8', usecols=[3, 4, 5, 6]), "train")
def get_test_examples(self, raw_data_dir):
return self._create_examples(
pd.read_csv(os.path.join(raw_data_dir, 'sts-test.csv'), header=None, sep='\t', quoting=csv.QUOTE_NONE,
encoding='utf-8', usecols=[3, 4, 5, 6]), "test")
def _create_examples(self, lines, set_type, skip_unsup=True):
examples = []
for (i, line) in lines.iterrows():
guid = "%s-%s" % (set_type, line[3])
text_a = line[5]
text_b = line[6]
label = line[4]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
return [] # from 0.0 to 5.0
def get_train_size(self):
return 7249
def get_test_size(self):
return 1379
def split(self, examples, n_splits=None, split=None, split_idx=None): # TODO control size
kf = KFold(n_splits=n_splits, random_state=C.get()['seed'])
kf = kf.split(list(range(len(examples))))
for _ in range(split_idx + 1): # split_idx equal to cv_fold. this loop is used to get i-th fold
train_idx, valid_idx = next(kf)
train_dev_set = np.array(examples)
return list(train_dev_set[train_idx]), list(train_dev_set[valid_idx])
class MRPCProcessor(DataProcessor):
def get_train_examples(self, raw_data_dir):
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, 'msr_paraphrase_train.txt')), "train")
def get_test_examples(self, raw_data_dir):
return self._create_examples(self._read_tsv(os.path.join(raw_data_dir, 'msr_paraphrase_test.txt')), "test")
def get_labels(self):
return [0, 1]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = int(line[0])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_train_size(self):
return 4076 # label_0:1323; label_1:2753
def get_test_size(self):
return 1725 # label_0:578; label_1:1147
def split(self, examples, test_size, train_size, n_splits=2, split_idx=0):
labels = [e.label for e in examples]
kf = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size, train_size=train_size,
random_state=C.get()['seed'])
kf = kf.split(list(range(len(examples))), labels)
for _ in range(split_idx + 1): # split_idx equal to cv_fold. this loop is used to get i-th fold
train_idx, valid_idx = next(kf)
train_dev_set = np.array(examples)
return list(train_dev_set[train_idx]), list(train_dev_set[valid_idx])
class SST2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, raw_data_dir):
train_e = self._create_examples(os.path.join(raw_data_dir, 'stsa.binary.train'), "train")
dev_e = self._create_examples(os.path.join(raw_data_dir, 'stsa.binary.dev'), "dev")
train_e.extend(dev_e)
return train_e
def get_test_examples(self, raw_data_dir):
return self._create_examples(os.path.join(raw_data_dir, 'stsa.binary.test'), "test")
def get_labels(self):
"""See base class."""
return [0, 1]
def _create_examples(self, dataroot, set_type):
examples = []
with open(dataroot, 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
guid = "%s-%s" % (set_type, i)
parts = line.strip().split()
label = int(parts[0])
text_a = ' '.join(parts[1:])
text_a = self.clean_sst_text(text_a)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def clean_sst_text(self, text):
"""Cleans tokens in the SST data, which has already been tokenized.
"""
text = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", text)
text = re.sub(r"\s{2,}", " ", text)
return text.strip().lower()
def get_train_size(self):
return 7791 # 6919+872
def get_test_size(self):
return 1821
def split(self, examples, test_size, train_size, n_splits=2, split_idx=0):
labels = [e.label for e in examples]
kf = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size, train_size=train_size,
random_state=C.get()['seed'])
kf = kf.split(list(range(len(examples))), labels)
for _ in range(split_idx + 1): # split_idx equal to cv_fold. this loop is used to get i-th fold
train_idx, valid_idx = next(kf)
train_dev_set = np.array(examples)
return list(train_dev_set[train_idx]), list(train_dev_set[valid_idx])
class SST5Processor(DataProcessor):
def __init__(self):
self.TEXT = torchtext.data.Field()
self.LABEL = torchtext.data.Field(sequential=False)
def get_train_examples(self, raw_data_dir):
train_e = torchtext.datasets.SST(os.path.join(raw_data_dir, 'train.txt'), self.TEXT, self.LABEL,
fine_grained=True).examples
dev_e = torchtext.datasets.SST(os.path.join(raw_data_dir, 'dev.txt'), self.TEXT, self.LABEL,
fine_grained=True).examples
train_e.extend(dev_e)
return self._create_examples(train_e, "train")
def get_test_examples(self, raw_data_dir):
test_e = torchtext.datasets.SST(os.path.join(raw_data_dir, 'test.txt'), self.TEXT, self.LABEL,
fine_grained=True).examples
return self._create_examples(test_e, "test")
def get_labels(self):
return ['negative', 'very positive', 'neutral', 'positive', 'very negative']
def _create_examples(self, lines, set_type, skip_unsup=True):
examples = []
for i, line in enumerate(lines):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(guid=guid, text_a=' '.join(line.text), text_b=None, label=line.label))
return examples
def get_train_size(self):
return 9643 # 8542+1101
def get_test_size(self):
return 2210
def split(self, examples, test_size, train_size, n_splits=2, split_idx=0):
label_map = {"negative": 0, "very positive": 1, 'neutral': 2, 'positive': 3, 'very negative': 4}
labels = [label_map[e.label] for e in examples]
kf = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size, train_size=train_size,
random_state=C.get()['seed'])
kf = kf.split(list(range(len(examples))), labels)
for _ in range(split_idx + 1): # split_idx equal to cv_fold. this loop is used to get i-th fold
train_idx, valid_idx = next(kf)
train_dev_set = np.array(examples)
return list(train_dev_set[train_idx]), list(train_dev_set[valid_idx])
class TRECProcessor(DataProcessor):
def __init__(self):
self.TEXT = torchtext.data.Field()
self.LABEL = torchtext.data.Field(sequential=False)
def get_train_examples(self, raw_data_dir):
train_e = torchtext.datasets.TREC(os.path.join(raw_data_dir, 'train_5500.label'), self.TEXT, self.LABEL,
fine_grained=False).examples
return self._create_examples(train_e, "train")
def get_test_examples(self, raw_data_dir):
test_e = torchtext.datasets.TREC(os.path.join(raw_data_dir, 'TREC_10.label'), self.TEXT, self.LABEL,
fine_grained=False).examples
return self._create_examples(test_e, "test")
def get_labels(self):
return ['ENTY', 'DESC', 'LOC', 'ABBR', 'NUM', 'HUM']
def _create_examples(self, lines, set_type, skip_unsup=True):
examples = []
for i, line in enumerate(lines):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(guid=guid, text_a=' '.join(line.text), text_b=None, label=line.label))
return examples
def get_train_size(self):
return 5452
def get_test_size(self):
return 500
def split(self, examples, test_size, train_size, n_splits=2, split_idx=0):
label_map = {"ENTY": 0, "DESC": 1, 'LOC': 2, 'ABBR': 3, 'NUM': 4, 'HUM': 5}
labels = [label_map[e.label] for e in examples]
kf = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size, train_size=train_size,
random_state=C.get()['seed'])
kf = kf.split(list(range(len(examples))), labels)
for _ in range(split_idx + 1): # split_idx equal to cv_fold. this loop is used to get i-th fold
train_idx, valid_idx = next(kf)
train_dev_set = np.array(examples)
return list(train_dev_set[train_idx]), list(train_dev_set[valid_idx])
class TextClassProcessor(DataProcessor):
def get_train_examples(self, raw_data_dir):
"""See base class."""
examples = self._create_examples(
self._read_tsv(os.path.join(raw_data_dir, "train.csv"), quotechar="\"", delimiter=","), "train")
assert len(examples) == self.get_train_size()
return examples
def get_test_examples(self, raw_data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(raw_data_dir, "test.csv"), quotechar="\"", delimiter=","), "test")
def get_unsup_examples(self, raw_data_dir, unsup_set):
"""See base class."""
if unsup_set == "unsup_in":
return self._create_examples(
self._read_tsv(os.path.join(raw_data_dir, "train.csv"), quotechar="\"", delimiter=","), "unsup_in",
skip_unsup=False)
else:
return self._create_examples(
self._read_tsv(os.path.join(raw_data_dir, "{:s}.csv".format(unsup_set)), quotechar="\"", delimiter=","),
unsup_set, skip_unsup=False)
def _create_examples(self, lines, set_type, skip_unsup=True, only_unsup=False):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if skip_unsup and line[0] == "unsup":
continue
if only_unsup and line[0] != "unsup":
continue
guid = "%s-%d" % (set_type, i)
if self.has_title:
text_a = line[2]
text_b = line[1]
else:
text_a = line[1]
text_b = None
label = int(line[0]) - 1 # TODO right for all datasets??
text_a = clean_web_text(text_a)
if text_b is not None:
text_b = clean_web_text(text_b)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def split(self, examples, test_size, train_size, n_splits=2, split_idx=0):
labels = [e.label for e in examples]
kf = StratifiedShuffleSplit(n_splits=n_splits, test_size=test_size, train_size=train_size,
random_state=C.get()['seed'])
kf = kf.split(list(range(len(examples))), labels)
for _ in range(split_idx + 1): # split_idx equal to cv_fold. this loop is used to get i-th fold
train_idx, valid_idx = next(kf)
train_dev_set = np.array(examples)
return list(train_dev_set[train_idx]), list(train_dev_set[valid_idx])
class YELP2Processor(TextClassProcessor):
def __init__(self):
self.has_title = False
def get_labels(self):
"""See base class."""
return [0, 1]
def get_train_size(self):
return 560000
def get_dev_size(self):
return 38000
class YELP5Processor(TextClassProcessor):
def __init__(self):
self.has_title = False
def get_labels(self):
"""See base class."""
return [i for i in range(0, 5)]
def get_train_size(self):
return 650000
def get_dev_size(self):
return 50000
class AMAZON2Processor(TextClassProcessor):
def __init__(self):
self.has_title = True
def get_labels(self):
"""See base class."""
return [0, 1]
def get_train_size(self):
return 3600000
def get_dev_size(self):
return 400000
def get_unsup_examples(self, raw_data_dir, unsup_set):
"""See base class."""
if unsup_set == "unsup_in":
return self._create_examples(
self._read_tsv(
os.path.join(raw_data_dir, "train.csv"),
quotechar="\"",
delimiter=","),
"unsup_in", skip_unsup=False)
else:
dir_cell = raw_data_dir[5:7]
unsup_dir = None # update this path if you use unsupervised data
return self._create_examples(
self._read_tsv(
os.path.join(unsup_dir, "{:s}.csv".format(unsup_set)),
quotechar="\"",
delimiter=","),
unsup_set, skip_unsup=False)
class AMAZON5Processor(TextClassProcessor):
def __init__(self):
self.has_title = True
def get_labels(self):
"""See base class."""
return [str(i) for i in range(1, 6)] # TODO why range(0,5)?
def get_unsup_examples(self, raw_data_dir, unsup_set):
"""See base class."""
if unsup_set == "unsup_in":
return self._create_examples(
self._read_tsv(os.path.join(raw_data_dir, "train.csv"), quotechar="\"", delimiter=","), "unsup_in",
skip_unsup=False)
else:
dir_cell = raw_data_dir[5:7]
unsup_dir = None # update this path if you use unsupervised data
return self._create_examples(
self._read_tsv(os.path.join(unsup_dir, "{:s}.csv".format(unsup_set)), quotechar="\"", delimiter=","),
unsup_set, skip_unsup=False)
def get_train_size(self):
return 3000000
def get_dev_size(self):
return 650000
class DBPediaProcessor(TextClassProcessor):
def __init__(self):
self.has_title = True
def get_labels(self):
"""See base class."""
return [str(i) for i in range(1, 15)]
def get_train_size(self):
return 560000
def get_dev_size(self):
return 70000
def get_processor(task_name):
"""get processor."""
task_name = task_name.lower()
processors = {
"imdb": IMDbProcessor,
"dbpedia": DBPediaProcessor,
"yelp2": YELP2Processor,
"yelp5": YELP5Processor,
"amazon2": AMAZON2Processor,
"amazon5": AMAZON5Processor,
'sts': STSProcessor,
'mrpc': MRPCProcessor,
'sst2': SST2Processor,
'sst5': SST5Processor,
'trec': TRECProcessor
}
processor = processors[task_name]()
return processor
if __name__ == '__main__':
pc = get_processor('yelp5')
pc.get_train_examples('/home/andyren/fast-autoaugment/FastAutoAugment/data/yelp_review_full_csv')
| 37.650927 | 120 | 0.604783 |
7946caf07a3f1ad50ee2264463b15dd64ab1ed02 | 167 | py | Python | Py_lab/Lab 1,2/matrix.py | veterinarian-5300/Genious-Python-Code-Generator | d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd | [
"Apache-2.0"
] | 1 | 2021-09-27T06:24:21.000Z | 2021-09-27T06:24:21.000Z | Py_lab/Lab 1,2/matrix.py | veterinarian-5300/Genious-Python-Code-Generator | d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd | [
"Apache-2.0"
] | null | null | null | Py_lab/Lab 1,2/matrix.py | veterinarian-5300/Genious-Python-Code-Generator | d78cd5f4b64221e8e4dc80d6e1f5ba0a4c613bcd | [
"Apache-2.0"
] | null | null | null | # importing numpy
import numpy as np
matrix = np.array([[2,4,6,10],
[5,10,15,20],
[7,14,21,28]])
print(matrix.shape)
print(matrix.size)
print(matrix.ndim)
| 15.181818 | 31 | 0.640719 |
7946cb641be9226b28a29f244355bb4c48eb2304 | 3,121 | py | Python | app/app/settings.py | BINAYKUMAR943/recipe-app-api | ce70ba8df09ac4df76250a26a9a2950f8044abf2 | [
"MIT"
] | null | null | null | app/app/settings.py | BINAYKUMAR943/recipe-app-api | ce70ba8df09ac4df76250a26a9a2950f8044abf2 | [
"MIT"
] | null | null | null | app/app/settings.py | BINAYKUMAR943/recipe-app-api | ce70ba8df09ac4df76250a26a9a2950f8044abf2 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=vp-0z7j-u#kw5n+!!z2q843*@tpw8rpi_5=rn02co5kz0*u_q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL='core.User'
| 24.968 | 91 | 0.69497 |
7946cbcfaf3c637be4fdc13d7b4bb36ec9ff0f8d | 5,285 | py | Python | travels/views.py | adrianboratyn/TripRecommendations | d3e5a10d80c405d5ac22f028be54c8198bc10410 | [
"MIT"
] | null | null | null | travels/views.py | adrianboratyn/TripRecommendations | d3e5a10d80c405d5ac22f028be54c8198bc10410 | [
"MIT"
] | null | null | null | travels/views.py | adrianboratyn/TripRecommendations | d3e5a10d80c405d5ac22f028be54c8198bc10410 | [
"MIT"
] | 2 | 2021-06-26T13:03:22.000Z | 2021-06-27T10:47:59.000Z | from django.contrib import messages
from .models import Trip, TripPicture, TripDates, Polling, TripReservation
from django.views.generic.edit import CreateView
from .forms import TripReservationForm, PollingForm
from django.views.generic.list import ListView
from django.views.generic import DetailView
from django.views.generic.edit import FormMixin
from django.urls import reverse_lazy
from django.urls import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
import travels.content_based_recommendation as cbr
from travels.helper import Helper
import travels.memory_based_collaborative_filtering as mbcf
class SelectedForYouListView(ListView):
model = Trip
template_name = 'travels/trip_list.html'
paginate_by = 12
context_object_name = "trips"
ankieta = None
def get_context_data(self, **kwargs):
context = super(SelectedForYouListView, self).get_context_data(**kwargs)
context['pictures'] = TripPicture.objects.all().filter(default=True, trip__in=self.get_queryset()[:50])
if self.request.user.is_authenticated:
context['polling'] = self.ankieta
return context
def get_queryset(self):
if self.request.user.is_authenticated:
user_reservations = TripReservation.manager_objects.user_reservations(self.request.user)
result_set = set()
for reservation in user_reservations:
try:
result_set.update(cbr.get_result(reservation.trip.title))
except IndexError:
print("Brak wycieczki w V2csv.csv")
except Exception:
print("Error")
self.ankieta = Polling.objects.filter(user=self.request.user).last()
if self.ankieta is not None:
polling_query = Helper.get_polling_query(self.ankieta)
for trip in polling_query:
if not result_set.__contains__(trip.title):
result_set.add(trip.title)
query = Trip.objects.filter(title__in=result_set).order_by('-rating')
return query
return Trip.objects.none()
class PopularListView(ListView):
model = Trip
paginate_by = 12
context_object_name = "trips"
template_name = "travels/trip_list.html"
def get_context_data(self, **kwargs):
context = super(PopularListView, self).get_context_data(**kwargs)
context['pictures'] = TripPicture.objects.all().filter(default=True, trip__in=self.get_queryset()[:50])
if self.request.user.is_authenticated:
context['polling'] = Polling.objects.filter(user=self.request.user).last()
return context
def get_queryset(self):
return Helper.popular
class OthersChooseListView(ListView):
model = Trip
paginate_by = 12
context_object_name = "trips"
def get_context_data(self, **kwargs):
context = super(OthersChooseListView, self).get_context_data(**kwargs)
context['pictures'] = TripPicture.objects.all().filter(default=True, trip__in=self.get_queryset()[:50])
if self.request.user.is_authenticated:
context['polling'] = Polling.objects.filter(user=self.request.user).last()
return context
def get_queryset(self):
if self.request.user.is_authenticated:
return mbcf.get_result(self.request.user)
return Trip.objects.none()
class TripDetailView(FormMixin, DetailView):
model = Trip
context_object_name = "trip"
form_class = TripReservationForm
def get_context_data(self, **kwargs):
context = super(TripDetailView, self).get_context_data(**kwargs)
context['pictures'] = TripPicture.objects.all().filter(trip=context['trip'])
context['dates'] = TripDates.objects.filter(trip=context['trip'])
TripReservationForm.select_dates=self.get_data(context['dates'])
context['form'] = self.form_class(initial={'user': self.request.user, 'trip': self.object})
return context
def get_success_url(self):
return reverse("travels:details", kwargs={"slug": self.object.slug})
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
messages.success(request, f'Rezerwacja została złożona')
return self.form_valid(form)
else:
print(form.cleaned_data)
return self.form_invalid(form)
def form_valid(self, form):
form.save(False)
print(form.cleaned_data)
form.save()
return super(TripDetailView, self).form_valid(form)
def get_data(self, data):
choices = list(data)
select_dates = []
for value in choices:
select_dates.append((value.pk, value.start_date))
return select_dates
class QuestionnaireView(LoginRequiredMixin, CreateView):
model = Polling
form_class = PollingForm
success_url = reverse_lazy('travels:selected_for_you')
def get_initial(self):
return {'user': self.request.user}
def form_valid(self, form):
result = super().form_valid(form)
messages.success(self.request, f'Dziękujemy za wypełnienie ankiety')
return result
| 36.958042 | 111 | 0.674551 |
7946cc394fc1966e217460a5675b0c3d76ec4447 | 2,483 | py | Python | lib/flowbber/args.py | simiolabs/flowbber | f306d9ade30a807d0b5c61620bbb528830e4bd8f | [
"Apache-2.0"
] | null | null | null | lib/flowbber/args.py | simiolabs/flowbber | f306d9ade30a807d0b5c61620bbb528830e4bd8f | [
"Apache-2.0"
] | 1 | 2019-02-21T19:26:34.000Z | 2019-02-21T19:26:34.000Z | lib/flowbber/args.py | simiolabs/flowbber | f306d9ade30a807d0b5c61620bbb528830e4bd8f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2017 KuraLabs S.R.L
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Argument management module.
"""
from pathlib import Path
from . import __version__
from .logging import get_logger, setup_logging
log = get_logger(__name__)
def validate_args(args):
"""
Validate that arguments are valid.
:param args: An arguments namespace.
:type args: :py:class:`argparse.Namespace`
:return: The validated namespace.
:rtype: :py:class:`argparse.Namespace`
"""
setup_logging(args.verbose)
log.debug('Raw arguments:\n{}'.format(args))
# Check if pipeline file exists
args.pipeline = Path(args.pipeline)
if not args.pipeline.is_file():
log.error('No such file {}'.format(args.pipeline))
exit(1)
args.pipeline = args.pipeline.resolve()
return args
def parse_args(argv=None):
"""
Argument parsing routine.
:param argv: A list of argument strings.
:type argv: list
:return: A parsed and verified arguments namespace.
:rtype: :py:class:`argparse.Namespace`
"""
from argparse import ArgumentParser
parser = ArgumentParser(
description=(
'Flowbber is a generic tool and framework that allows to execute '
'custom pipelines for data gathering, publishing and analysis.'
)
)
parser.add_argument(
'-v', '--verbose',
help='Increase verbosity level',
default=0,
action='count'
)
parser.add_argument(
'--version',
action='version',
version='Flowbber v{}'.format(__version__)
)
parser.add_argument(
'-d', '--dry-run',
help='Dry run the pipeline',
default=False,
action='store_true'
)
parser.add_argument(
'pipeline',
help='Pipeline definition file'
)
args = parser.parse_args(argv)
args = validate_args(args)
return args
__all__ = ['parse_args']
| 24.106796 | 78 | 0.653645 |
7946ccf65fb3c95db7e70c25021c9e6569e68a68 | 3,320 | py | Python | benchmarks/benchmarks/bench_ufunc.py | leifdenby/numpy | 4750c2810c5e0943cbea8e2acc0337c4e66a9bb2 | [
"BSD-3-Clause"
] | 1 | 2021-01-06T21:28:45.000Z | 2021-01-06T21:28:45.000Z | benchmarks/benchmarks/bench_ufunc.py | leifdenby/numpy | 4750c2810c5e0943cbea8e2acc0337c4e66a9bb2 | [
"BSD-3-Clause"
] | null | null | null | benchmarks/benchmarks/bench_ufunc.py | leifdenby/numpy | 4750c2810c5e0943cbea8e2acc0337c4e66a9bb2 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from .common import Benchmark, squares_
import numpy as np
ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin',
'arcsinh', 'arctan', 'arctan2', 'arctanh', 'bitwise_and',
'bitwise_not', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil',
'conj', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad',
'degrees', 'divide', 'equal', 'exp', 'exp2', 'expm1',
'fabs', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod',
'frexp', 'greater', 'greater_equal', 'hypot', 'invert',
'isfinite', 'isinf', 'isnan', 'ldexp', 'left_shift', 'less',
'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
'logical_xor', 'maximum', 'minimum', 'mod', 'modf',
'multiply', 'negative', 'nextafter', 'not_equal', 'power',
'rad2deg', 'radians', 'reciprocal', 'remainder',
'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh',
'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh',
'true_divide', 'trunc']
for name in dir(np):
if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs:
print("Missing ufunc %r" % (name,))
class Broadcast(Benchmark):
def setup(self):
self.d = np.ones((50000, 100), dtype=np.float64)
self.e = np.ones((100,), dtype=np.float64)
def time_broadcast(self):
self.d - self.e
class UFunc(Benchmark):
params = [ufuncs]
param_names = ['ufunc']
timeout = 2
def setup(self, ufuncname):
np.seterr(all='ignore')
try:
self.f = getattr(np, ufuncname)
except AttributeError:
raise NotImplementedError()
self.args = []
for t, a in squares_.items():
arg = (a,) * self.f.nin
try:
self.f(*arg)
except TypeError:
continue
self.args.append(arg)
def time_ufunc_types(self, ufuncname):
[self.f(*arg) for arg in self.args]
class Custom(Benchmark):
def setup(self):
self.b = np.ones(20000, dtype=np.bool)
def time_nonzero(self):
np.nonzero(self.b)
def time_count_nonzero(self):
np.count_nonzero(self.b)
def time_not_bool(self):
(~self.b)
def time_and_bool(self):
(self.b & self.b)
def time_or_bool(self):
(self.b | self.b)
class CustomScalar(Benchmark):
params = [np.float32, np.float64]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_add_scalar2(self, dtype):
np.add(self.d, 1)
def time_divide_scalar2(self, dtype):
np.divide(self.d, 1)
def time_divide_scalar2_inplace(self, dtype):
np.divide(self.d, 1, out=self.d)
def time_less_than_scalar2(self, dtype):
(self.d < 1)
class Scalar(Benchmark):
def setup(self):
self.x = np.asarray(1.0)
self.y = np.asarray((1.0 + 1j))
self.z = complex(1.0, 1.0)
def time_add_scalar(self):
(self.x + self.x)
def time_add_scalar_conv(self):
(self.x + 1.0)
def time_add_scalar_conv_complex(self):
(self.y + self.z)
| 28.376068 | 76 | 0.569578 |
7946cd4ad6664caeba3a1f92dd7e55d2cdc87a72 | 12,167 | py | Python | salt/cli/caller.py | JesseRhoads/salt | bd5395ea85956e064970710aae03398cbd1b20f5 | [
"Apache-2.0"
] | 1 | 2020-10-02T02:29:25.000Z | 2020-10-02T02:29:25.000Z | salt/cli/caller.py | JesseRhoads/salt | bd5395ea85956e064970710aae03398cbd1b20f5 | [
"Apache-2.0"
] | null | null | null | salt/cli/caller.py | JesseRhoads/salt | bd5395ea85956e064970710aae03398cbd1b20f5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
The caller module is used as a front-end to manage direct calls to the salt
minion modules.
'''
# Import python libs
from __future__ import print_function
import os
import sys
import logging
import datetime
import traceback
# Import salt libs
import salt.exitcodes
import salt.loader
import salt.minion
import salt.output
import salt.payload
import salt.transport
import salt.utils.args
from salt._compat import string_types
from salt.log import LOG_LEVELS
from salt.utils import print_cli
from salt.utils import kinds
log = logging.getLogger(__name__)
try:
from raet import raeting, nacling
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
except ImportError:
# Don't die on missing transport libs since only one transport is required
pass
# Custom exceptions
from salt.exceptions import (
SaltClientError,
CommandNotFoundError,
CommandExecutionError,
SaltInvocationError,
)
class Caller(object):
'''
Factory class to create salt-call callers for different transport
'''
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
ttype = 'zeromq'
# determine the ttype
if 'transport' in opts:
ttype = opts['transport']
elif 'transport' in opts.get('pillar', {}).get('master', {}):
ttype = opts['pillar']['master']['transport']
# switch on available ttypes
if ttype == 'zeromq':
return ZeroMQCaller(opts, **kwargs)
elif ttype == 'raet':
return RAETCaller(opts, **kwargs)
else:
raise Exception('Callers are only defined for ZeroMQ and raet')
# return NewKindOfCaller(opts, **kwargs)
class ZeroMQCaller(object):
'''
Object to wrap the calling of local salt modules for the salt-call command
'''
def __init__(self, opts):
'''
Pass in the command line options
'''
self.opts = opts
self.opts['caller'] = True
self.serial = salt.payload.Serial(self.opts)
# Handle this here so other deeper code which might
# be imported as part of the salt api doesn't do a
# nasty sys.exit() and tick off our developer users
try:
self.minion = salt.minion.SMinion(opts)
except SaltClientError as exc:
raise SystemExit(str(exc))
def call(self):
'''
Call the module
'''
ret = {}
fun = self.opts['fun']
ret['jid'] = '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if fun not in self.minion.functions:
sys.stderr.write('Function {0} is not available.'.format(fun))
mod_name = fun.split('.')[0]
if mod_name in self.minion.function_errors:
sys.stderr.write(' Possible reasons: {0}\n'.format(self.minion.function_errors[mod_name]))
else:
sys.stderr.write('\n')
sys.exit(-1)
try:
sdata = {
'fun': fun,
'pid': os.getpid(),
'jid': ret['jid'],
'tgt': 'salt-call'}
args, kwargs = salt.minion.load_args_and_kwargs(
self.minion.functions[fun],
salt.utils.args.parse_input(self.opts['arg']),
data=sdata)
try:
with salt.utils.fopen(proc_fn, 'w+b') as fp_:
fp_.write(self.serial.dumps(sdata))
except NameError:
# Don't require msgpack with local
pass
except IOError:
sys.stderr.write(
'Cannot write to process directory. '
'Do you have permissions to '
'write to {0} ?\n'.format(proc_fn))
func = self.minion.functions[fun]
try:
ret['return'] = func(*args, **kwargs)
except TypeError as exc:
trace = traceback.format_exc()
sys.stderr.write('Passed invalid arguments: {0}\n'.format(exc))
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(trace)
sys.exit(salt.exitcodes.EX_GENERIC)
try:
ret['retcode'] = sys.modules[
func.__module__].__context__.get('retcode', 0)
except AttributeError:
ret['retcode'] = 1
except (CommandExecutionError) as exc:
msg = 'Error running \'{0}\': {1}\n'
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(traceback.format_exc())
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(salt.exitcodes.EX_GENERIC)
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found: {1}\n'
sys.stderr.write(msg.format(fun, str(exc)))
sys.exit(salt.exitcodes.EX_GENERIC)
try:
os.remove(proc_fn)
except (IOError, OSError):
pass
if hasattr(self.minion.functions[fun], '__outputter__'):
oput = self.minion.functions[fun].__outputter__
if isinstance(oput, string_types):
ret['out'] = oput
is_local = self.opts['local'] or self.opts.get(
'file_client', False) == 'local'
returners = self.opts.get('return', '').split(',')
if (not is_local) or returners:
ret['id'] = self.opts['id']
ret['fun'] = fun
ret['fun_args'] = self.opts['arg']
for returner in returners:
try:
ret['success'] = True
self.minion.returners['{0}.returner'.format(returner)](ret)
except Exception:
pass
# return the job infos back up to the respective minion's master
if not is_local:
try:
mret = ret.copy()
mret['jid'] = 'req'
self.return_pub(mret)
except Exception:
pass
# close raet channel here
return ret
def return_pub(self, ret):
'''
Return the data up to the master
'''
channel = salt.transport.Channel.factory(self.opts, usage='salt_call')
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in ret.items():
load[key] = value
channel.send(load)
def print_docs(self):
'''
Pick up the documentation for all of the modules and print it out.
'''
docs = {}
for name, func in self.minion.functions.items():
if name not in docs:
if func.__doc__:
docs[name] = func.__doc__
for name in sorted(docs):
if name.startswith(self.opts.get('fun', '')):
print_cli('{0}:\n{1}\n'.format(name, docs[name]))
def print_grains(self):
'''
Print out the grains
'''
grains = salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts)
def run(self):
'''
Execute the salt call logic
'''
try:
ret = self.call()
out = ret.get('out', 'nested')
if self.opts['metadata']:
print_ret = ret
out = 'nested'
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
out,
self.opts)
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
raise SystemExit(err)
class RAETCaller(ZeroMQCaller):
'''
Object to wrap the calling of local salt modules for the salt-call command
when transport is raet
'''
def __init__(self, opts):
'''
Pass in the command line options
'''
stack, estatename, yardname = self._setup_caller_stack(opts)
self.stack = stack
salt.transport.jobber_stack = self.stack
#salt.transport.jobber_estate_name = estatename
#salt.transport.jobber_yard_name = yardname
super(RAETCaller, self).__init__(opts)
def run(self):
'''
Execute the salt call logic
'''
try:
ret = self.call()
self.stack.server.close()
salt.transport.jobber_stack = None
if self.opts['metadata']:
print_ret = ret
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
ret.get('out', 'nested'),
self.opts)
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
except SaltInvocationError as err:
raise SystemExit(err)
def _setup_caller_stack(self, opts):
'''
Setup and return the LaneStack and Yard used by by channel when global
not already setup such as in salt-call to communicate to-from the minion
'''
role = opts.get('id')
if not role:
emsg = ("Missing role required to setup RAETChannel.")
log.error(emsg + "\n")
raise ValueError(emsg)
kind = opts.get('__role') # application kind 'master', 'minion', etc
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for RAETChannel.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion],
kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]:
lanename = "{0}_{1}".format(role, kind)
else:
emsg = ("Unsupported application kind '{0}' for RAETChannel.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
sockdirpath = opts['sock_dir']
stackname = 'caller' + nacling.uuid(size=18)
stack = LaneStack(name=stackname,
lanename=lanename,
sockdirpath=sockdirpath)
stack.Pk = raeting.packKinds.pack
stack.addRemote(RemoteYard(stack=stack,
name='manor',
lanename=lanename,
dirpath=sockdirpath))
log.debug("Created Caller Jobber Stack {0}\n".format(stack.name))
# name of Road Estate for this caller
estatename = "{0}_{1}".format(role, kind)
# name of Yard for this caller
yardname = stack.local.name
# return identifiers needed to route back to this callers master
return (stack, estatename, yardname)
def _setup_caller(self, opts):
'''
Setup up RaetCaller stacks and behaviors
Essentially a subset of a minion whose only function is to perform
Salt-calls with raet as the transport
The essentials:
A RoadStack whose local estate name is of the form "role_kind" where:
role is the minion id opts['id']
kind is opts['__role'] which should be 'caller' APPL_KIND_NAMES
The RoadStack if for communication to/from a master
A LaneStack with manor yard so that RaetChannels created by the func Jobbers
can communicate through this manor yard then through the
RoadStack to/from a master
A Router to route between the stacks (Road and Lane)
These are all managed via a FloScript named caller.flo
'''
pass
| 34.370056 | 106 | 0.552396 |
7946cd55b64e51763b83ef66b394e961275ca224 | 579 | py | Python | src/multilingual/context_processors.py | daniel-werner/stelagifts | 809cb70f98a1ead3acbc2fd6aea99a87fdfd9435 | [
"MIT"
] | null | null | null | src/multilingual/context_processors.py | daniel-werner/stelagifts | 809cb70f98a1ead3acbc2fd6aea99a87fdfd9435 | [
"MIT"
] | null | null | null | src/multilingual/context_processors.py | daniel-werner/stelagifts | 809cb70f98a1ead3acbc2fd6aea99a87fdfd9435 | [
"MIT"
] | null | null | null | from multilingual.languages import get_language_code_list, get_default_language_code
from multilingual.settings import LANG_DICT
from django.conf import settings
def multilingual(request):
"""
Returns context variables containing information about available languages.
"""
codes = sorted(get_language_code_list())
return {'LANGUAGE_CODES': codes,
'LANGUAGE_CODES_AND_NAMES': [(c, LANG_DICT.get(c, c)) for c in codes],
'DEFAULT_LANGUAGE_CODE': get_default_language_code(),
'ADMIN_MEDIA_URL': settings.ADMIN_MEDIA_PREFIX}
| 38.6 | 84 | 0.740933 |
7946cd9e3c1d170db9f3c449fc30a4c10ff631e4 | 3,458 | py | Python | dcnow.py | sairuk/dreampi | 8aba6801059433b93bb2fc44e29fc4e0b44e03e9 | [
"MIT"
] | null | null | null | dcnow.py | sairuk/dreampi | 8aba6801059433b93bb2fc44e29fc4e0b44e03e9 | [
"MIT"
] | null | null | null | dcnow.py | sairuk/dreampi | 8aba6801059433b93bb2fc44e29fc4e0b44e03e9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import threading
import os
import json
import time
import logging
import sh
# python3 first then python2
try:
import urllib2 as urlh
except ImportError:
import urllib.request as urlh
try:
import urllib.parse as urlp
except:
import urllib as urlp
from hashlib import sha256
from uuid import getnode as get_mac
logger = logging.getLogger('dcnow')
API_ROOT = "https://dcnow-2016.appspot.com"
UPDATE_END_POINT = "/api/update/{mac_address}/"
UPDATE_INTERVAL = 15
CONFIGURATION_FILE = os.path.expanduser("~/.dreampi.json")
def scan_mac_address():
mac = get_mac()
return sha256(':'.join(("%012X" % mac)[i:i+2] for i in range(0, 12, 2)).encode('utf-8')).hexdigest()
class DreamcastNowThread(threading.Thread):
def __init__(self, service):
self._service = service
self._running = True
super(DreamcastNowThread, self).__init__()
def run(self):
def post_update():
if not self._service._enabled:
return
lines = [ x for x in sh.tail("/var/log/syslog", "-n", "10", _iter=True) ]
dns_query = None
for line in lines[::-1]:
if "CONNECT" in line and "dreampi" in line:
# Don't seek back past connection
break
if "query[A]" in line:
# We did a DNS lookup, what was it?
remainder = line[line.find("query[A]") + len("query[A]"):].strip()
domain = remainder.split(" ", 1)[0].strip()
dns_query = sha256(domain).hexdigest()
break
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT), Dreamcast Now'
header = { 'User-Agent' : user_agent }
mac_address = self._service._mac_address
data = {}
if dns_query:
data["dns_query"] = dns_query
data = urlp.urlencode(data)
req = Request(API_ROOT + UPDATE_END_POINT.format(mac_address=mac_address), data, header)
urlh.urlopen(req) # Send POST update
while self._running:
try:
post_update()
except:
logger.exception("Couldn't update Dreamcast Now!")
time.sleep(UPDATE_INTERVAL)
def stop(self):
self._running = False
self.join()
class DreamcastNowService(object):
def __init__(self):
self._thread = None
self._mac_address = None
self._enabled = True
self.reload_settings()
logger.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address='/dev/log')
logger.addHandler(handler)
def update_mac_address(self, dreamcast_ip):
self._mac_address = scan_mac_address()
logger.info("MAC address: {}".format(self._mac_address))
def reload_settings(self):
settings_file = CONFIGURATION_FILE
if os.path.exists(settings_file):
with open(settings_file, "r") as settings:
content = json.loads(settings.read())
self._enabled = content["enabled"]
def go_online(self, dreamcast_ip):
if not self._enabled:
return
self.update_mac_address(dreamcast_ip)
self._thread = DreamcastNowThread(self)
self._thread.start()
def go_offline(self):
self._thread.stop()
self._thread = None
| 28.578512 | 104 | 0.593985 |
7946ce3f7f98959e1db957cffd5879b5c089d44e | 1,963 | py | Python | tmt/steps/prepare/shell.py | pkis/tmt | 66fa6417bb5483fd3a432f008e863f6ad9003501 | [
"MIT"
] | 2 | 2022-03-04T05:57:48.000Z | 2022-03-09T18:49:02.000Z | tmt/steps/prepare/shell.py | pkis/tmt | 66fa6417bb5483fd3a432f008e863f6ad9003501 | [
"MIT"
] | 91 | 2022-02-15T14:09:03.000Z | 2022-03-31T16:05:28.000Z | tmt/steps/prepare/shell.py | pkis/tmt | 66fa6417bb5483fd3a432f008e863f6ad9003501 | [
"MIT"
] | 5 | 2022-02-21T08:02:23.000Z | 2022-03-26T16:43:46.000Z | import click
import fmf
import tmt
import tmt.utils
class PrepareShell(tmt.steps.prepare.PreparePlugin):
"""
Prepare guest using shell scripts
Example config:
prepare:
how: shell
script:
- sudo dnf install -y 'dnf-command(copr)'
- sudo dnf copr enable -y psss/tmt
- sudo dnf install -y tmt
Use 'order' attribute to select in which order preparation should
happen if there are multiple configs. Default order is '50'.
Default order of required packages installation is '70'.
"""
# Supported methods
_methods = [tmt.steps.Method(name='shell', doc=__doc__, order=50)]
# Supported keys
_keys = ["script"]
@classmethod
def options(cls, how=None):
""" Prepare command line options """
return [
click.option(
'-s', '--script', metavar='SCRIPT',
help='Shell script to be executed.')
] + super().options(how)
def default(self, option, default=None):
""" Return default data for given option """
if option == 'script':
return []
return default
def wake(self, keys=None):
""" Wake up the plugin, process data, apply options """
super().wake(keys=keys)
# Convert to list if single script provided
tmt.utils.listify(self.data, keys=['script'])
def go(self, guest):
""" Prepare the guests """
super().go(guest)
# Give a short summary
scripts = self.get('script')
overview = fmf.utils.listed(scripts, 'script')
self.info('overview', f'{overview} found', 'green')
# Execute each script on the guest (with default shell options)
for script in scripts:
self.verbose('script', script, 'green')
script_with_options = f'{tmt.utils.SHELL_OPTIONS}; {script}'
guest.execute(script_with_options, cwd=self.step.plan.worktree)
| 28.867647 | 75 | 0.599083 |
7946ce633331354ca9497b9f24c80f10a3b1734d | 213 | py | Python | c-series/c015.py | TheLurkingCat/ZeroJudge | 6fc49c54a45e2b4b3a8d04b7a5a1fc81a2ff4eee | [
"MIT"
] | 1 | 2018-10-21T10:03:42.000Z | 2018-10-21T10:03:42.000Z | c-series/c015.py | TheLurkingCat/ZeroJudge | 6fc49c54a45e2b4b3a8d04b7a5a1fc81a2ff4eee | [
"MIT"
] | null | null | null | c-series/c015.py | TheLurkingCat/ZeroJudge | 6fc49c54a45e2b4b3a8d04b7a5a1fc81a2ff4eee | [
"MIT"
] | 2 | 2018-10-12T16:40:11.000Z | 2021-04-05T12:05:36.000Z | a = int(input())
for _ in range(a):
x = input()
y = x[::-1]
counter = 0
while x != y or not counter:
x = str(int(x) + int(y))
y = x[::-1]
counter += 1
print(counter, x)
| 19.363636 | 32 | 0.43662 |
7946cf7921705a2305b6f5a228f1cf14353b33ba | 3,811 | py | Python | 14A-235/HI/imaging/ebhis_regrid_m31.py | Astroua/LocalGroup-VLA | 4920341c9e25343d724fb4a2e37cdcd234201047 | [
"MIT"
] | 1 | 2019-04-11T00:37:56.000Z | 2019-04-11T00:37:56.000Z | 14A-235/HI/imaging/ebhis_regrid_m31.py | Astroua/LocalGroup-VLA | 4920341c9e25343d724fb4a2e37cdcd234201047 | [
"MIT"
] | null | null | null | 14A-235/HI/imaging/ebhis_regrid_m31.py | Astroua/LocalGroup-VLA | 4920341c9e25343d724fb4a2e37cdcd234201047 | [
"MIT"
] | null | null | null |
'''
Regrid the EBHIS data to match the 14A footprint.
'''
from astropy.io import fits
from astropy.wcs import WCS
from spectral_cube import SpectralCube
from astropy.utils.console import ProgressBar
import numpy as np
import os
import astropy.units as u
from cube_analysis.io_utils import create_huge_fits
from paths import (fourteenA_HI_data_path,
ebhis_m31_HI_data_path,
m31_data_path)
from constants import hi_freq
osjoin = os.path.join
def vel_to_freq(vel_or_freq, rest_freq=hi_freq,
unit=u.Hz):
'''
Using radio velocity here.
'''
equiv = u.doppler_radio(rest_freq)
return vel_or_freq.to(unit, equiv)
run_04kms = True
ebhis_outpath = ebhis_m31_HI_data_path('14A-235_items', no_check=True)
if not os.path.exists(ebhis_outpath):
os.mkdir(ebhis_outpath)
ebhis_name = "CAR_C01.fits"
cube = SpectralCube.read(ebhis_m31_HI_data_path(ebhis_name))
if run_04kms:
out_name = "CAR_C01_14A235_match_04kms.fits"
out_name_specregrid = "CAR_C01_14A235_match_04kms_spectralregrid.fits"
# We require the spatial pb mask and a saved FITS header that defines the
# spatial WCS information of the VLA cube
vla_pbmask = fits.getdata(fourteenA_HI_data_path("14A_spatial_pbmask.fits")) > 0
vla_spat_hdr = fits.Header.fromtextfile(fourteenA_HI_data_path("14A_spatial_header.txt"))
# Hard code in properties to make the spectral axis
# Define axis in frequency. Then convert to V_rad
freq_0 = 1420433643.3212132 * u.Hz
# This cube averages over 5 channels
del_freq = 1952.9365057945251 * u.Hz
nchan = 1526
freq_axis = np.arange(nchan) * del_freq + freq_0
vel_axis = vel_to_freq(freq_axis, unit=u.m / u.s)
save_name = osjoin(ebhis_outpath, out_name_specregrid)
# Spectral interpolation, followed by reprojection.
if not os.path.exists(save_name):
cube = cube.spectral_interpolate(vel_axis)
if cube._is_huge:
output_fits = create_huge_fits(save_name, cube.header,
return_hdu=True)
for chan in ProgressBar(cube.shape[0]):
output_fits[0].data[chan] = cube[chan].value
output_fits.flush()
output_fits.close()
else:
cube.write(save_name, overwrite=True)
else:
cube = SpectralCube.read(save_name)
# Make the reprojected header
new_header = cube.header.copy()
new_header["NAXIS"] = 3
new_header["NAXIS1"] = vla_pbmask.shape[1]
new_header["NAXIS2"] = vla_pbmask.shape[0]
new_header["NAXIS3"] = nchan
new_header['CRVAL3'] = vel_axis[0].value
# COMMENT is in here b/c casa adds an illegal comment format
kwarg_skip = ['TELESCOP', 'BUNIT', 'INSTRUME', 'COMMENT']
for key in cube.header:
if key == 'HISTORY':
continue
if key in vla_spat_hdr:
if "NAXIS" in key:
continue
if key in kwarg_skip:
continue
new_header[key] = vla_spat_hdr[key]
new_header.update(cube.beam.to_header_keywords())
new_header["BITPIX"] = -32
# Build up the reprojected cube per channel
save_name = osjoin(ebhis_outpath, out_name)
output_fits = create_huge_fits(save_name, new_header, return_hdu=True)
targ_header = WCS(vla_spat_hdr).celestial.to_header()
targ_header["NAXIS"] = 2
targ_header["NAXIS1"] = vla_pbmask.shape[1]
targ_header["NAXIS2"] = vla_pbmask.shape[0]
for chan in ProgressBar(cube.shape[0]):
reproj_chan = \
cube[chan].reproject(targ_header).value.astype(np.float32)
output_fits[0].data[chan] = reproj_chan
if chan % 100 == 0:
output_fits.flush()
output_fits.close()
del output_fits
del cube
| 29.773438 | 93 | 0.672264 |
7946cf8c7f3eb96ef44a8f0c9536655228aabbf3 | 6,629 | py | Python | .modules/.CMSeeK/cmsbrute/joom.py | termux-one/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 1,103 | 2018-04-20T14:08:11.000Z | 2022-03-29T06:22:43.000Z | .modules/.CMSeeK/cmsbrute/joom.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 29 | 2019-04-03T14:52:38.000Z | 2022-03-24T12:33:05.000Z | .modules/.CMSeeK/cmsbrute/joom.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 161 | 2018-04-20T15:57:12.000Z | 2022-03-15T19:16:16.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# This is a part of CMSeeK, check the LICENSE file for more information
# Copyright (c) 2018 Tuhinshubhra
### Joomla Bruteforce module
### Version 1.3
### This thing took a whole freaking night to build... apperently i was dealing with the cookies in a not so "Wise" manner!
### cmseekbruteforcemodule <- make sure you include this comment in any custom modules you create so that cmseek can recognize it as a part of it's module
import cmseekdb.basic as cmseek
import cmseekdb.sc as source # Contains function to detect cms from source code
import cmseekdb.header as header # Contains function to detect CMS from gathered http headers
import cmseekdb.generator as generator
import multiprocessing ## Let's speed things up a lil bit (actually a hell lot faster) shell we?
from functools import partial ## needed somewhere :/
import sys
import cmseekdb.generator as generator
import re
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
from html.parser import HTMLParser
class extInpTags(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.return_array = {}
def handle_starttag(self, tag, attrs):
if tag == "input":
name = None
value = None
for nm,val in attrs:
if nm == "name":
name = val
if nm == "value":
value = val
if name is not None and value is not None:
self.return_array.update({name:value})
def testlogin(url,user,passw):
url = url + '/administrator/index.php'
cj = http.cookiejar.FileCookieJar("cookieszz")
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
joomloginsrc = opener.open(url).read().decode()
parser = extInpTags()
post_array = parser.feed(joomloginsrc)
main_param = {'username':user, 'passwd':passw}
other_param = parser.return_array
post_data = main_param.copy()
post_data.update(other_param)
post_datad = urllib.parse.urlencode(post_data).encode("utf-8")
ua = cmseek.randomua('generatenewuaeverytimetobesafeiguess')
try:
with opener.open(url, post_datad) as response:
scode = response.read().decode()
headers = str(response.info())
rurl = response.geturl()
r = ['1', scode, headers, rurl] ## 'success code', 'source code', 'http headers', 'redirect url'
return r
except Exception as e:
e = str(e)
r = ['2', e, '', ''] ## 'error code', 'error message', 'empty'
return r
print('hola')
def start():
cmseek.clearscreen()
cmseek.banner("Joomla Bruteforce Module")
url = cmseek.targetinp("") # input('Enter Url: ')
cmseek.info("Checking for Joomla")
bsrc = cmseek.getsource(url, cmseek.randomua('foodislove'))
joomcnf = '0'
if bsrc[0] != '1':
cmseek.error("Could not get target source, CMSeek is quitting")
cmseek.handle_quit()
else:
## Parse generator meta tag
parse_generator = generator.parse(bsrc[1])
ga = parse_generator[0]
ga_content = parse_generator[1]
try1 = generator.scan(ga_content)
if try1[0] == '1' and try1[1] == 'joom':
joomcnf = '1'
else:
try2 = source.check(bsrc[1], url)
if try2[0] == '1' and try2[1] == 'joom':
joomcnf = '1'
else:
try3 = header.check(bsrc[2]) # Headers Check!
if try3[0] == '1' and try3[1] == 'joom':
joomcnf = '1'
else:
joomcnf = '0'
if joomcnf != '1':
cmseek.error('Could not confirm Joomla... CMSeek is quitting')
cmseek.handle_quit()
else:
cmseek.success("Joomla Confirmed... Confirming form and getting token...")
joomloginsrc = cmseek.getsource(url + '/administrator/index.php', cmseek.randomua('thatsprettygay'))
if joomloginsrc[0] == '1' and '<form' in joomloginsrc[1]:
# joomtoken = re.findall(r'type=\"hidden\" name=\"(.*?)\" value=\"1\"', joomloginsrc[1])
# if len(joomtoken) == 0:
# cmseek.error('Unable to get token... CMSeek is quitting!')
# cmseek.handle_quit()
# cmseek.success("Token grabbed successfully: " + cmseek.bold + joomtoken[0] + cmseek.cln)
# token = joomtoken[0]
joomparamuser = []
rawuser = input("[~] Enter Usernames with coma as separation without any space (example: cris,harry): ").split(',')
for rusr in rawuser:
joomparamuser.append(rusr)
joombruteusers = set(joomparamuser) ## Strip duplicate usernames in case any smartass didn't read the full thing and entered admin as well
for user in joombruteusers:
passfound = '0'
print('\n')
cmseek.info("Bruteforcing User: " + cmseek.bold + user + cmseek.cln)
pwd_file = open("wordlist/passwords.txt", "r")
passwords = pwd_file.read().split('\n')
passwords.insert(0, user)
for password in passwords:
if password != '' and password != '\n':
sys.stdout.write('[*] Testing Password: ')
sys.stdout.write('%s\r\r' % password)
sys.stdout.flush()
# print("Testing Pass: " + password)
cursrc = testlogin(url, user, password)
# print('Token: ' + token)
# print("Ret URL: " + str(cursrc[3]))
if 'logout' in str(cursrc[1]):
print('\n')
cmseek.success('Password found!')
print(" |\n |--[username]--> " + cmseek.bold + user + cmseek.cln + "\n |\n |--[password]--> " + cmseek.bold + password + cmseek.cln + "\n |")
cmseek.success('Enjoy The Hunt!')
cmseek.savebrute(url,url + '/administrator/index.php',user,password)
passfound = '1'
break
else:
continue
break
if passfound == '0':
cmseek.error('\n\nCould Not find Password!')
print('\n\n')
else:
cmseek.error("Couldn't find login form... CMSeeK is quitting")
cmseek.handle_quit()
| 44.193333 | 169 | 0.559964 |
7946d095939bc8bd45fc2c84022a1475a3f77ace | 2,071 | py | Python | krux_redis/sentinel.py | krux/python-krux-redis | a7b1723a21001ec5d67271f170bc6b1ea6e7f98e | [
"MIT"
] | null | null | null | krux_redis/sentinel.py | krux/python-krux-redis | a7b1723a21001ec5d67271f170bc6b1ea6e7f98e | [
"MIT"
] | null | null | null | krux_redis/sentinel.py | krux/python-krux-redis | a7b1723a21001ec5d67271f170bc6b1ea6e7f98e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# © 2017 Krux, A Salesforce Company
#
from __future__ import absolute_import
from redis.sentinel import ConnectionError, Sentinel
from krux.cli import Application
from krux_redis.cli import add_sentinel_cli_arguments
SEPARATOR = ':'
SOCKET_TIMEOUT = 0.1
def hostport_from_string(spec):
"""
Given SPEC, a string specifying a HOST:PORT pair, return a tuple
of (HOST, PORT).
"""
return tuple(spec.split(SEPARATOR))
def hostports_from_args(args):
"""
Given a Namespace object (parsed CLI options), return a list of
(HOST, PORT) pairs specified by the Namespace.
"""
return [hostport_from_string(s) for s in getattr(args, 'sentinel', [])]
class TestApplication(Application):
def __init__(self):
# Call to the superclass to bootstrap.
super(TestApplication, self).__init__(name='krux-redis-sentinel')
self.logger.debug('Parsing sentinels from args: %r', self.args.sentinel)
sentinels = hostports_from_args(self.args)
self.logger.debug('Parsed sentinel host, port pairs: %r', sentinels)
self.logger.debug('Initializing Sentinel instance...')
self.sentinel = Sentinel(sentinels, socket_timeout=SOCKET_TIMEOUT)
self.logger.debug('Initialized Sentinel instance: %r', self.sentinel)
def add_cli_arguments(self, parser):
add_sentinel_cli_arguments(parser)
def run(self):
master = self.sentinel.master_for(self.name)
slave = self.sentinel.slave_for(self.name)
try:
self.logger.info('Ping master: %s', master.ping())
except ConnectionError as err:
self.logger.warning('Could not connect to master!')
self.logger.error(err)
try:
self.logger.info('Ping slave: %s', slave.ping())
except ConnectionError as err:
self.logger.warning('Could not connect to slave!')
self.logger.error(err)
def main():
TestApplication().run()
# Run the application stand alone
if __name__ == '__main__':
main()
| 27.986486 | 80 | 0.668759 |
7946d0ca8d02f13664bad960a24f0f16e4be5b1b | 1,583 | py | Python | fn_pastebin/fn_pastebin/util/customize.py | lmahoney1/resilient-community-apps | 1f60fb100e6a697df7b901d7a4aad707fea3dfee | [
"MIT"
] | null | null | null | fn_pastebin/fn_pastebin/util/customize.py | lmahoney1/resilient-community-apps | 1f60fb100e6a697df7b901d7a4aad707fea3dfee | [
"MIT"
] | null | null | null | fn_pastebin/fn_pastebin/util/customize.py | lmahoney1/resilient-community-apps | 1f60fb100e6a697df7b901d7a4aad707fea3dfee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_pastebin"""
import base64
import os
import io
try:
from resilient import ImportDefinition
except ImportError:
# Support Apps running on resilient-circuits < v35.0.195
from resilient_circuits.util import ImportDefinition
RES_FILE = "data/export.res"
def codegen_reload_data():
"""
Parameters required reload codegen for the fn_pastebin package
"""
return {
"package": u"fn_pastebin",
"message_destinations": [u"fn_pastebin"],
"functions": [u"fn_create_pastebin"],
"workflows": [u"example_create_pastebin"],
"actions": [u"Create Pastebin"],
"incident_fields": [],
"incident_artifact_types": [],
"datatables": [],
"automatic_tasks": [],
"scripts": []
}
def customization_data(client=None):
"""
Returns a Generator of ImportDefinitions (Customizations).
Install them using `resilient-circuits customize`
IBM Resilient Platform Version: 36.0.5634
Contents:
- Message Destinations:
- fn_pastebin
- Functions:
- fn_create_pastebin
- Workflows:
- example_create_pastebin
- Rules:
- Create Pastebin
"""
res_file = os.path.join(os.path.dirname(__file__), RES_FILE)
if not os.path.isfile(res_file):
raise FileNotFoundError("{} not found".format(RES_FILE))
with io.open(res_file, mode='rt') as f:
b64_data = base64.b64encode(f.read().encode('utf-8'))
yield ImportDefinition(b64_data) | 26.830508 | 68 | 0.651927 |
7946d18dc48b97ddd1a993a15f718ef34f18fa6c | 4,816 | py | Python | tests/unit/preprocessor/_regrid/test__stock_cube.py | markelg/ESMValCore | b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c | [
"Apache-2.0"
] | 26 | 2019-06-07T07:50:07.000Z | 2022-03-22T21:04:01.000Z | tests/unit/preprocessor/_regrid/test__stock_cube.py | markelg/ESMValCore | b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c | [
"Apache-2.0"
] | 1,370 | 2019-06-06T09:03:07.000Z | 2022-03-31T04:37:20.000Z | tests/unit/preprocessor/_regrid/test__stock_cube.py | zklaus/ESMValCore | 5656fb8b546eeb4d750a424de7ed56a237edfabb | [
"Apache-2.0"
] | 26 | 2019-07-03T13:08:48.000Z | 2022-03-02T16:08:47.000Z | """
Unit tests for the :func:`esmvalcore.preprocessor.regrid._stock_cube`
function.
"""
import unittest
from unittest import mock
import iris
import numpy as np
import tests
from esmvalcore.preprocessor._regrid import (_LAT_MAX, _LAT_MIN, _LAT_RANGE,
_LON_MAX, _LON_MIN, _LON_RANGE)
from esmvalcore.preprocessor._regrid import _global_stock_cube
class Test(tests.Test):
def _check(self, dx, dy, lat_off=True, lon_off=True):
# Generate the expected stock cube coordinate points.
dx, dy = float(dx), float(dy)
mid_dx, mid_dy = dx / 2, dy / 2
if lat_off and lon_off:
expected_lat_points = np.linspace(
_LAT_MIN + mid_dy, _LAT_MAX - mid_dy, int(_LAT_RANGE / dy))
expected_lon_points = np.linspace(
_LON_MIN + mid_dx, _LON_MAX - mid_dx, int(_LON_RANGE / dx))
else:
expected_lat_points = np.linspace(_LAT_MIN, _LAT_MAX,
int(_LAT_RANGE / dy) + 1)
expected_lon_points = np.linspace(_LON_MIN, _LON_MAX - dx,
int(_LON_RANGE / dx))
# Check the stock cube coordinates.
self.assertEqual(self.mock_DimCoord.call_count, 2)
call_lats, call_lons = self.mock_DimCoord.call_args_list
# Check the latitude coordinate creation.
[args], kwargs = call_lats
self.assert_array_equal(args, expected_lat_points)
expected_lat_kwargs = dict(standard_name='latitude',
units='degrees_north',
var_name='lat',
circular=False)
self.assertEqual(kwargs, expected_lat_kwargs)
# Check the longitude coordinate creation.
[args], kwargs = call_lons
self.assert_array_equal(args, expected_lon_points)
expected_lon_kwargs = dict(standard_name='longitude',
units='degrees_east',
var_name='lon',
circular=False)
self.assertEqual(kwargs, expected_lon_kwargs)
# Check that the coordinate guess_bounds method has been called.
expected_calls = [mock.call.guess_bounds()] * 2
self.assertEqual(self.mock_coord.mock_calls, expected_calls)
# Check the stock cube creation.
self.mock_Cube.assert_called_once()
_, kwargs = self.mock_Cube.call_args
spec = [(self.mock_coord, 0), (self.mock_coord, 1)]
expected_cube_kwargs = dict(dim_coords_and_dims=spec)
self.assertEqual(kwargs, expected_cube_kwargs)
# Reset the mocks to enable multiple calls per test-case.
for mocker in self.mocks:
mocker.reset_mock()
def setUp(self):
self.Cube = mock.sentinel.Cube
self.mock_Cube = self.patch('iris.cube.Cube', return_value=self.Cube)
self.mock_coord = mock.Mock(spec=iris.coords.DimCoord)
self.mock_DimCoord = self.patch(
'iris.coords.DimCoord', return_value=self.mock_coord)
self.mocks = [self.mock_Cube, self.mock_coord, self.mock_DimCoord]
def test_invalid_cell_spec__alpha(self):
emsg = 'Invalid MxN cell specification'
with self.assertRaisesRegex(ValueError, emsg):
_global_stock_cube('Ax1')
def test_invalid_cell_spec__separator(self):
emsg = 'Invalid MxN cell specification'
with self.assertRaisesRegex(ValueError, emsg):
_global_stock_cube('1y1')
def test_invalid_cell_spec__longitude(self):
emsg = 'Invalid longitude delta in MxN cell specification'
with self.assertRaisesRegex(ValueError, emsg):
_global_stock_cube('1.3x1')
def test_invalid_cell_spec__latitude(self):
emsg = 'Invalid latitude delta in MxN cell specification'
with self.assertRaisesRegex(ValueError, emsg):
_global_stock_cube('1x2.3')
def test_specs(self):
specs = ['0.5x0.5', '1x1', '2.5x2.5', '5x5', '10x10']
for spec in specs:
result = _global_stock_cube(spec)
self.assertEqual(result, self.Cube)
self._check(*list(map(float, spec.split('x'))))
def test_specs_no_offset(self):
specs = ['0.5x0.5', '1x1', '2.5x2.5', '5x5', '10x10']
for spec in specs:
result = _global_stock_cube(spec,
lat_offset=False,
lon_offset=False)
self.assertEqual(result, self.Cube)
self._check(
*list(map(float, spec.split('x'))),
lat_off=False,
lon_off=False)
if __name__ == '__main__':
unittest.main()
| 39.47541 | 77 | 0.603405 |
7946d21359bfccd19b277796022012201f672ce9 | 1,943 | py | Python | avwx_account/views/token.py | flyinactor91/AVWX-Account | 29f3b9226699243966f9c7b041e94773c79d0314 | [
"MIT"
] | 1 | 2019-09-14T02:20:04.000Z | 2019-09-14T02:20:04.000Z | avwx_account/views/token.py | flyinactor91/AVWX-Account | 29f3b9226699243966f9c7b041e94773c79d0314 | [
"MIT"
] | null | null | null | avwx_account/views/token.py | flyinactor91/AVWX-Account | 29f3b9226699243966f9c7b041e94773c79d0314 | [
"MIT"
] | 1 | 2019-03-23T09:34:50.000Z | 2019-03-23T09:34:50.000Z | """
Token management views
"""
# library
from flask import flash, redirect, render_template, request, url_for
from flask_user import login_required, current_user
# app
from avwx_account import app
@app.route("/token/new")
@login_required
def new_token():
if current_user.new_token():
current_user.save()
else:
flash("Your account has been disabled. Contact [email protected]", "error")
return redirect(url_for("manage"))
@app.route("/token/edit", methods=["GET", "POST"])
@login_required
def edit_token():
token = current_user.get_token(request.args.get("value"))
if token is None:
flash("Token not found in your account", "error")
return redirect(url_for("manage"))
if request.method == "POST":
if current_user.update_token(
token.value,
name=request.form.get("name", "App"),
active=bool(request.form.get("active")),
):
current_user.save()
return redirect(url_for("manage"))
flash("Your token was not able to be updated", "error")
return render_template("edit_token.html", token=token)
@app.route("/token/refresh")
@login_required
def refresh_token():
token = current_user.get_token(request.args.get("value"))
if token is None:
flash("Token not found in your account", "error")
return redirect(url_for("manage"))
current_user.refresh_token(token.value)
current_user.save()
return redirect(url_for("manage"))
@app.route("/token/delete")
@login_required
def delete_token():
token = current_user.get_token(request.args.get("value"))
if token is None:
flash("Token not found in your account", "error")
elif token.type == "dev":
flash("Cannot delete a development token. Disable instead", "error")
else:
current_user.remove_token_by(value=token.value)
current_user.save()
return redirect(url_for("manage"))
| 29.439394 | 81 | 0.664951 |
7946d24dccf22d442eae14407c544ec50c0e0e8c | 4,306 | py | Python | core/pycopia/OS/Linux/sequencer.py | kdart/pycopia3 | 8a7c820f096245411eabbb72345e4f30a35988b6 | [
"Apache-2.0"
] | 3 | 2018-11-26T15:00:20.000Z | 2022-01-28T23:17:58.000Z | core/pycopia/OS/Linux/sequencer.py | kdart/pycopia3 | 8a7c820f096245411eabbb72345e4f30a35988b6 | [
"Apache-2.0"
] | null | null | null | core/pycopia/OS/Linux/sequencer.py | kdart/pycopia3 | 8a7c820f096245411eabbb72345e4f30a35988b6 | [
"Apache-2.0"
] | 1 | 2018-11-26T15:00:21.000Z | 2018-11-26T15:00:21.000Z | #!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A sequencer for running various object at different time periods, starts, and
durations.
"""
from time import monotonic
from heapq import heappop, heappush
from pycopia.timers import FDTimer, CLOCK_MONOTONIC
from pycopia import asyncio
class PeriodicTask(asyncio.PollerInterface):
def __init__(self, callback, period=0.0, delay=0.0, callback_args=()):
self._callback = callback
self._args = callback_args
self._period = period
self._timer = timer = FDTimer()
if delay:
timer.settime(delay, period)
else:
timer.settime(period, period)
def __del__(self):
self.close()
def fileno(self):
return self._timer.fileno()
def close(self):
self._timer.close()
def stop(self):
self._timer.settime(0.0, 0.0)
def start(self):
self._timer.settime(self._period, self._period)
def readable(self):
if self._timer.closed:
return False
value, interval = self._timer.gettime()
return not (value == 0.0 and interval == 0.0)
def read_handler(self):
count = self._timer.read()
while count > 0:
self._callback(*self._args)
count -= 1
if self._period == 0.0:
fd = self._timer.fileno()
self._timer.close()
raise asyncio.UnregisterFDNow(fd)
class Sequencer(object):
def __init__(self, poller=None):
self._expireq = []
self._poller = poller or asyncio.Poll()
self._duration_timer = FDTimer(CLOCK_MONOTONIC, nonblocking=1)
self._poller.register_fd(self._duration_timer.fileno(), asyncio.EPOLLIN, self._duration_timeout)
def __del__(self):
self.close()
def fileno(self):
return self._poller.fileno()
poller = property(lambda self: self._poller)
def close(self):
if self._poller is not None:
self._poller.unregister_all()
self._duration_timer.close()
for expire, task in self._expireq:
task.close()
self._poller = None
self._duration_timer = None
self._expireq = []
def add_task(self, callback, period=0.0, delay=0.0, duration=None, callback_args=()):
task = PeriodicTask(callback, period, delay, callback_args)
self._poller.register(task)
if duration:
expire = monotonic() + duration + delay
heappush(self._expireq, (expire, task))
self._duration_timer.settime(self._expireq[0][0], 0.0, absolute=True)
def _duration_timeout(self):
count = self._duration_timer.read()
expire, task = heappop(self._expireq)
task.stop()
self._poller.unregister(task)
task.close()
if self._expireq:
self._duration_timer.settime(self._expireq[0][0], 0.0, absolute=True)
else:
self._duration_timer.settime(0.0, 0.0)
#self._poller.unregister_fd(self._duration_timer.fileno())
def run(self, progress_cb=None):
poller = self._poller
while self._expireq:
poller.poll(5.0)
if progress_cb is not None:
progress_cb(len(self._expireq))
if __name__ == "__main__":
def task1():
print("task1 called")
def task2():
print("task2 called")
def task3():
print("task3 called")
s = Sequencer()
s.add_task(task1, 2.0, duration=10.0)
s.add_task(task2, 2.0, delay=5.0, duration=10.0)
start = monotonic()
s.run()
s.add_task(task3, 2.0, delay=16.0, duration=5.0)
s.run()
s.close()
print("Duration:", monotonic()-start)
| 29.696552 | 104 | 0.627497 |
7946d2b66ed76d196e36b1a6b37ca2f7c96baabe | 2,685 | py | Python | example.py | andrew-ld/async_worker.py | 013b95e8a2f763c62fcc53d11e9c5dc330aec81d | [
"MIT"
] | 1 | 2020-02-06T18:00:04.000Z | 2020-02-06T18:00:04.000Z | example.py | andrew-ld/async_worker.py | 013b95e8a2f763c62fcc53d11e9c5dc330aec81d | [
"MIT"
] | null | null | null | example.py | andrew-ld/async_worker.py | 013b95e8a2f763c62fcc53d11e9c5dc330aec81d | [
"MIT"
] | null | null | null | # This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import asyncio
from multiprocessing import cpu_count
from async_worker import AsyncTaskScheduler, AsyncTask, OneLoopAsyncTask
class Test1(AsyncTask):
_pause: int
def __init__(self, pause: int):
super().__init__()
self._pause = pause
async def process(self) -> int:
print(id(self), self.__class__.__name__, self._pause)
return self._pause
class Test2(AsyncTask):
_after: int
_bootstrapped = False
def __init__(self, after: int):
super().__init__()
self._after = after
async def process(self) -> int:
if not self._bootstrapped:
self._bootstrapped = True
return self._after
for i in range(50):
task = Test3(i)
await self.future(task)
return False
class Test3(OneLoopAsyncTask):
_i: int
async def process(self):
await asyncio.sleep(self._i)
print("sleep", self._i)
async def setup(self):
print("setup triggered")
def __init__(self, i: int):
super().__init__()
self._i = i
async def main():
scheduler = AsyncTaskScheduler()
for i in range(6, 12):
await scheduler.submit(Test1(i))
await scheduler.submit(Test2(6))
await asyncio.gather(*(scheduler.loop() for _ in range(cpu_count())))
if __name__ == "__main__":
_loop = asyncio.get_event_loop()
_loop.run_until_complete(main())
| 29.184783 | 73 | 0.6946 |
7946d2c9f7c2c62f417f9c97900d44f5cf2d7411 | 75,989 | py | Python | tests/python/proton_tests/engine.py | Azure/qpid-proton | fa784b1f3c4f3dbd6b143d5cceda10bf76da23a5 | [
"Apache-2.0"
] | 2 | 2020-04-28T13:33:06.000Z | 2020-06-01T14:51:05.000Z | tests/python/proton_tests/engine.py | Azure/qpid-proton | fa784b1f3c4f3dbd6b143d5cceda10bf76da23a5 | [
"Apache-2.0"
] | null | null | null | tests/python/proton_tests/engine.py | Azure/qpid-proton | fa784b1f3c4f3dbd6b143d5cceda10bf76da23a5 | [
"Apache-2.0"
] | 4 | 2015-10-17T20:44:45.000Z | 2021-06-08T19:00:56.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import os, gc
import sys
from . import common
from time import time, sleep
from proton import *
from .common import pump, Skipped
from proton.reactor import Reactor
from proton._compat import str2bin
# older versions of gc do not provide the garbage list
if not hasattr(gc, "garbage"):
gc.garbage=[]
# future test areas
# + different permutations of setup
# - creating deliveries and calling input/output before opening the session/link
# + shrinking output_size down to something small? should the enginge buffer?
# + resuming
# - locally and remotely created deliveries with the same tag
# Jython 2.5 needs this:
try:
bytes()
except:
bytes = str
# and this...
try:
bytearray()
except:
def bytearray(x):
return str2bin('\x00') * x
OUTPUT_SIZE = 10*1024
class Test(common.Test):
def __init__(self, *args):
common.Test.__init__(self, *args)
self._wires = []
def connection(self):
c1 = Connection()
c2 = Connection()
t1 = Transport()
t1.bind(c1)
t2 = Transport()
t2.bind(c2)
self._wires.append((c1, t1, c2, t2))
mask1 = 0
mask2 = 0
for cat in ("TRACE_FRM", "TRACE_RAW"):
trc = os.environ.get("PN_%s" % cat)
if trc and trc.lower() in ("1", "2", "yes", "true"):
mask1 = mask1 | getattr(Transport, cat)
if trc == "2":
mask2 = mask2 | getattr(Transport, cat)
t1.trace(mask1)
t2.trace(mask2)
return c1, c2
def link(self, name, max_frame=None, idle_timeout=None):
c1, c2 = self.connection()
if max_frame:
c1.transport.max_frame_size = max_frame[0]
c2.transport.max_frame_size = max_frame[1]
if idle_timeout:
# idle_timeout in seconds expressed as float
c1.transport.idle_timeout = idle_timeout[0]
c2.transport.idle_timeout = idle_timeout[1]
c1.open()
c2.open()
ssn1 = c1.session()
ssn1.open()
self.pump()
ssn2 = c2.session_head(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
ssn2.open()
self.pump()
snd = ssn1.sender(name)
rcv = ssn2.receiver(name)
return snd, rcv
def cleanup(self):
self._wires = []
def pump(self, buffer_size=OUTPUT_SIZE):
for c1, t1, c2, t2 in self._wires:
pump(t1, t2, buffer_size)
class ConnectionTest(Test):
def setUp(self):
gc.enable()
self.c1, self.c2 = self.connection()
def cleanup(self):
# release resources created by this class
super(ConnectionTest, self).cleanup()
self.c1 = None
self.c2 = None
def tearDown(self):
self.cleanup()
gc.collect()
assert not gc.garbage
def test_open_close(self):
assert self.c1.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.c1.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.c1.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.c2.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.c2.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_open_close(self):
assert self.c1.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.c2.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.c1.open()
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.c1.close()
self.c2.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.c2.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_capabilities(self):
self.c1.offered_capabilities = Array(UNDESCRIBED, Data.SYMBOL,
symbol("O_one"),
symbol("O_two"),
symbol("O_three"))
self.c1.desired_capabilities = Array(UNDESCRIBED, Data.SYMBOL,
symbol("D_one"),
symbol("D_two"),
symbol("D_three"))
self.c1.open()
assert self.c2.remote_offered_capabilities is None
assert self.c2.remote_desired_capabilities is None
self.pump()
assert self.c2.remote_offered_capabilities == self.c1.offered_capabilities, \
(self.c2.remote_offered_capabilities, self.c1.offered_capabilities)
assert self.c2.remote_desired_capabilities == self.c1.desired_capabilities, \
(self.c2.remote_desired_capabilities, self.c1.desired_capabilities)
def test_condition(self):
self.c1.open()
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.c1.condition = cond
self.c1.close()
self.pump()
assert self.c1.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = self.c2.remote_condition
assert rcond == cond, (rcond, cond)
def test_properties(self, p1={symbol("key"): symbol("value")}, p2=None):
self.c1.properties = p1
self.c2.properties = p2
self.c1.open()
self.c2.open()
self.pump()
assert self.c2.remote_properties == p1, (self.c2.remote_properties, p1)
assert self.c1.remote_properties == p2, (self.c2.remote_properties, p2)
# The proton implementation limits channel_max to 32767.
# If I set the application's limit lower than that, I should
# get my wish. If I set it higher -- not.
def test_channel_max_low(self, value=1234):
self.c1.transport.channel_max = value
self.c1.open()
self.pump()
assert self.c1.transport.channel_max == value, (self.c1.transport.channel_max, value)
def test_channel_max_high(self, value=65535):
self.c1.transport.channel_max = value
self.c1.open()
self.pump()
if "java" in sys.platform:
assert self.c1.transport.channel_max == 65535, (self.c1.transport.channel_max, value)
else:
assert self.c1.transport.channel_max == 32767, (self.c1.transport.channel_max, value)
def test_channel_max_raise_and_lower(self):
if "java" in sys.platform:
upper_limit = 65535
else:
upper_limit = 32767
# It's OK to lower the max below upper_limit.
self.c1.transport.channel_max = 12345
assert self.c1.transport.channel_max == 12345
# But it won't let us raise the limit above PN_IMPL_CHANNEL_MAX.
self.c1.transport.channel_max = 65535
assert self.c1.transport.channel_max == upper_limit
# send the OPEN frame
self.c1.open()
self.pump()
# Now it's too late to make any change, because
# we have already sent the OPEN frame.
try:
self.c1.transport.channel_max = 666
assert False, "expected session exception"
except:
pass
assert self.c1.transport.channel_max == upper_limit
def test_channel_max_limits_sessions(self):
return
# This is an index -- so max number of channels should be 1.
self.c1.transport.channel_max = 0
self.c1.open()
self.c2.open()
ssn_0 = self.c2.session()
assert ssn_0 != None
ssn_0.open()
self.pump()
try:
ssn_1 = self.c2.session()
assert False, "expected session exception"
except SessionException:
pass
def test_cleanup(self):
self.c1.open()
self.c2.open()
self.pump()
assert self.c1.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
t1 = self.c1.transport
t2 = self.c2.transport
c2 = self.c2
self.c1.close()
# release all references to C1, except that held by the transport
self.cleanup()
gc.collect()
# transport should flush last state from C1:
pump(t1, t2)
assert c2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
def test_user_config(self):
if "java" in sys.platform:
raise Skipped("Unsupported API")
self.c1.user = "vindaloo"
self.c1.password = "secret"
self.c1.open()
self.pump()
self.c2.user = "leela"
self.c2.password = "trustno1"
self.c2.open()
self.pump()
assert self.c1.user == "vindaloo", self.c1.user
assert self.c1.password == None, self.c1.password
assert self.c2.user == "leela", self.c2.user
assert self.c2.password == None, self.c2.password
class SessionTest(Test):
def setUp(self):
gc.enable()
self.c1, self.c2 = self.connection()
self.ssn = self.c1.session()
self.c1.open()
self.c2.open()
def cleanup(self):
# release resources created by this class
super(SessionTest, self).cleanup()
self.c1 = None
self.c2 = None
self.ssn = None
def tearDown(self):
self.cleanup()
gc.collect()
assert not gc.garbage
def test_open_close(self):
assert self.ssn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.ssn.open()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
assert ssn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
ssn.open()
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
ssn.close()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.ssn.close()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
self.pump()
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_close(self):
self.ssn.open()
self.pump()
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
ssn.open()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.ssn.close()
ssn.close()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_closing_connection(self):
self.ssn.open()
self.pump()
self.c1.close()
self.pump()
self.ssn.close()
self.pump()
def test_condition(self):
self.ssn.open()
self.pump()
ssn = self.c2.session_head(Endpoint.REMOTE_ACTIVE | Endpoint.LOCAL_UNINIT)
assert ssn != None
ssn.open()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.ssn.condition = cond
self.ssn.close()
self.pump()
assert self.ssn.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = ssn.remote_condition
assert rcond == cond, (rcond, cond)
def test_cleanup(self):
snd, rcv = self.link("test-link")
snd.open()
rcv.open()
self.pump()
snd_ssn = snd.session
rcv_ssn = rcv.session
assert rcv_ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.ssn = None
snd_ssn.close()
snd_ssn.free()
del snd_ssn
gc.collect()
self.pump()
assert rcv_ssn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
def test_reopen_on_same_session_without_free(self):
"""
confirm that a link is correctly opened when attaching to a previously
closed link *that has not been freed yet* on the same session
"""
self.ssn.open()
self.pump()
ssn2 = self.c2.session_head(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
ssn2.open()
self.pump()
snd = self.ssn.sender("test-link")
rcv = ssn2.receiver("test-link")
assert snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
snd.open()
rcv.open()
self.pump()
assert snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
snd.close()
rcv.close()
self.pump()
assert snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
snd = self.ssn.sender("test-link")
rcv = ssn2.receiver("test-link")
assert snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
snd.open()
rcv.open()
self.pump()
assert snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
def test_set_get_outgoing_window(self):
assert self.ssn.outgoing_window == 2147483647
self.ssn.outgoing_window = 1024
assert self.ssn.outgoing_window == 1024
class LinkTest(Test):
def setUp(self):
gc.enable()
self.snd, self.rcv = self.link("test-link")
def cleanup(self):
# release resources created by this class
super(LinkTest, self).cleanup()
self.snd = None
self.rcv = None
def tearDown(self):
self.cleanup()
gc.collect()
assert not gc.garbage, gc.garbage
def test_open_close(self):
assert self.snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.snd.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE
self.rcv.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.snd.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
self.rcv.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_simultaneous_open_close(self):
assert self.snd.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT
self.snd.open()
self.rcv.open()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_UNINIT
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.snd.close()
self.rcv.close()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
assert self.rcv.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_CLOSED
def test_multiple(self):
rcv = self.snd.session.receiver("second-rcv")
assert rcv.name == "second-rcv"
self.snd.open()
rcv.open()
self.pump()
c2 = self.rcv.session.connection
l = c2.link_head(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
while l:
l.open()
l = l.next(Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE)
self.pump()
assert self.snd
assert rcv
self.snd.close()
rcv.close()
ssn = rcv.session
conn = ssn.connection
ssn.close()
conn.close()
self.pump()
def test_closing_session(self):
self.snd.open()
self.rcv.open()
ssn1 = self.snd.session
self.pump()
ssn1.close()
self.pump()
self.snd.close()
self.pump()
def test_closing_connection(self):
self.snd.open()
self.rcv.open()
ssn1 = self.snd.session
c1 = ssn1.connection
self.pump()
c1.close()
self.pump()
self.snd.close()
self.pump()
def assertEqualTermini(self, t1, t2):
assert t1.type == t2.type, (t1.type, t2.type)
assert t1.address == t2.address, (t1.address, t2.address)
assert t1.durability == t2.durability, (t1.durability, t2.durability)
assert t1.expiry_policy == t2.expiry_policy, (t1.expiry_policy, t2.expiry_policy)
assert t1.timeout == t2.timeout, (t1.timeout, t2.timeout)
assert t1.dynamic == t2.dynamic, (t1.dynamic, t2.dynamic)
for attr in ["properties", "capabilities", "outcomes", "filter"]:
d1 = getattr(t1, attr)
d2 = getattr(t2, attr)
assert d1.format() == d2.format(), (attr, d1.format(), d2.format())
def _test_source_target(self, config_source, config_target):
if config_source is None:
self.snd.source.type = Terminus.UNSPECIFIED
else:
config_source(self.snd.source)
if config_target is None:
self.snd.target.type = Terminus.UNSPECIFIED
else:
config_target(self.snd.target)
self.snd.open()
self.pump()
self.assertEqualTermini(self.rcv.remote_source, self.snd.source)
self.assertEqualTermini(self.rcv.remote_target, self.snd.target)
self.rcv.target.copy(self.rcv.remote_target)
self.rcv.source.copy(self.rcv.remote_source)
self.rcv.open()
self.pump()
self.assertEqualTermini(self.snd.remote_target, self.snd.target)
self.assertEqualTermini(self.snd.remote_source, self.snd.source)
def test_source_target(self):
self._test_source_target(TerminusConfig(address="source"),
TerminusConfig(address="target"))
def test_source(self):
self._test_source_target(TerminusConfig(address="source"), None)
def test_target(self):
self._test_source_target(None, TerminusConfig(address="target"))
def test_coordinator(self):
self._test_source_target(None, TerminusConfig(type=Terminus.COORDINATOR))
def test_source_target_full(self):
self._test_source_target(TerminusConfig(address="source",
timeout=3,
dist_mode=Terminus.DIST_MODE_MOVE,
filter=[("int", 1), ("symbol", "two"), ("string", "three")],
capabilities=["one", "two", "three"]),
TerminusConfig(address="source",
timeout=7,
capabilities=[]))
def test_distribution_mode(self):
self._test_source_target(TerminusConfig(address="source",
dist_mode=Terminus.DIST_MODE_COPY),
TerminusConfig(address="target"))
assert self.rcv.remote_source.distribution_mode == Terminus.DIST_MODE_COPY
assert self.rcv.remote_target.distribution_mode == Terminus.DIST_MODE_UNSPECIFIED
def test_dynamic_link(self):
self._test_source_target(TerminusConfig(address=None, dynamic=True), None)
assert self.rcv.remote_source.dynamic
assert self.rcv.remote_source.address is None
def test_condition(self):
self.snd.open()
self.rcv.open()
self.pump()
assert self.snd.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
cond = Condition("blah:bleh", "this is a description", {symbol("foo"): "bar"})
self.snd.condition = cond
self.snd.close()
self.pump()
assert self.snd.state == Endpoint.LOCAL_CLOSED | Endpoint.REMOTE_ACTIVE
assert self.rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
rcond = self.rcv.remote_condition
assert rcond == cond, (rcond, cond)
def test_settle_mode(self):
self.snd.snd_settle_mode = Link.SND_UNSETTLED
assert self.snd.snd_settle_mode == Link.SND_UNSETTLED
self.rcv.rcv_settle_mode = Link.RCV_SECOND
assert self.rcv.rcv_settle_mode == Link.RCV_SECOND
assert self.snd.remote_rcv_settle_mode != Link.RCV_SECOND
assert self.rcv.remote_snd_settle_mode != Link.SND_UNSETTLED
self.snd.open()
self.rcv.open()
self.pump()
assert self.snd.remote_rcv_settle_mode == Link.RCV_SECOND
assert self.rcv.remote_snd_settle_mode == Link.SND_UNSETTLED
def test_cleanup(self):
snd, rcv = self.link("test-link")
snd.open()
rcv.open()
self.pump()
assert rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
snd.close()
snd.free()
del snd
gc.collect()
self.pump()
assert rcv.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
class TerminusConfig:
def __init__(self, type=None, address=None, timeout=None, durability=None,
filter=None, capabilities=None, dynamic=False, dist_mode=None):
self.address = address
self.timeout = timeout
self.durability = durability
self.filter = filter
self.capabilities = capabilities
self.dynamic = dynamic
self.dist_mode = dist_mode
self.type = type
def __call__(self, terminus):
if self.type is not None:
terminus.type = self.type
if self.address is not None:
terminus.address = self.address
if self.timeout is not None:
terminus.timeout = self.timeout
if self.durability is not None:
terminus.durability = self.durability
if self.capabilities is not None:
terminus.capabilities.put_array(False, Data.SYMBOL)
terminus.capabilities.enter()
for c in self.capabilities:
terminus.capabilities.put_symbol(c)
if self.filter is not None:
terminus.filter.put_map()
terminus.filter.enter()
for (t, v) in self.filter:
setter = getattr(terminus.filter, "put_%s" % t)
setter(v)
if self.dynamic:
terminus.dynamic = True
if self.dist_mode is not None:
terminus.distribution_mode = self.dist_mode
class TransferTest(Test):
def setUp(self):
gc.enable()
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def cleanup(self):
# release resources created by this class
super(TransferTest, self).cleanup()
self.c1 = None
self.c2 = None
self.snd = None
self.rcv = None
def tearDown(self):
self.cleanup()
gc.collect()
assert not gc.garbage
def test_work_queue(self):
assert self.c1.work_head is None
self.snd.delivery("tag")
assert self.c1.work_head is None
self.rcv.flow(1)
self.pump()
d = self.c1.work_head
assert d is not None
tag = d.tag
assert tag == "tag", tag
assert d.writable
n = self.snd.send(str2bin("this is a test"))
assert self.snd.advance()
assert self.c1.work_head is None
self.pump()
d = self.c2.work_head
assert d.tag == "tag"
assert d.readable
def test_multiframe(self):
self.rcv.flow(1)
self.snd.delivery("tag")
msg = str2bin("this is a test")
n = self.snd.send(msg)
assert n == len(msg)
self.pump()
d = self.rcv.current
assert d
assert d.tag == "tag", repr(d.tag)
assert d.readable
binary = self.rcv.recv(1024)
assert binary == msg, (binary, msg)
binary = self.rcv.recv(1024)
assert binary == str2bin("")
msg = str2bin("this is more")
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(1024)
assert binary == msg, (binary, msg)
binary = self.rcv.recv(1024)
assert binary is None
def test_disposition(self):
self.rcv.flow(1)
self.pump()
sd = self.snd.delivery("tag")
msg = str2bin("this is a test")
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
rd = self.rcv.current
assert rd is not None
assert rd.tag == sd.tag
rmsg = self.rcv.recv(1024)
assert rmsg == msg
rd.update(Delivery.ACCEPTED)
self.pump()
rdisp = sd.remote_state
ldisp = rd.local_state
assert rdisp == ldisp == Delivery.ACCEPTED, (rdisp, ldisp)
assert sd.updated
sd.update(Delivery.ACCEPTED)
self.pump()
assert sd.local_state == rd.remote_state == Delivery.ACCEPTED
sd.settle()
def test_delivery_id_ordering(self):
self.rcv.flow(1024)
self.pump(buffer_size=64*1024)
#fill up delivery buffer on sender
for m in range(1024):
sd = self.snd.delivery("tag%s" % m)
msg = ("message %s" % m).encode('ascii')
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump(buffer_size=64*1024)
#receive a session-windows worth of messages and accept them
for m in range(1024):
rd = self.rcv.current
assert rd is not None, m
assert rd.tag == ("tag%s" % m), (rd.tag, m)
msg = self.rcv.recv(1024)
assert msg == ("message %s" % m).encode('ascii'), (msg, m)
rd.update(Delivery.ACCEPTED)
rd.settle()
self.pump(buffer_size=64*1024)
#add some new deliveries
for m in range(1024, 1450):
sd = self.snd.delivery("tag%s" % m)
msg = ("message %s" % m).encode('ascii')
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
#handle all disposition changes to sent messages
d = self.c1.work_head
while d:
next_d = d.work_next
if d.updated:
d.update(Delivery.ACCEPTED)
d.settle()
d = next_d
#submit some more deliveries
for m in range(1450, 1500):
sd = self.snd.delivery("tag%s" % m)
msg = ("message %s" % m).encode('ascii')
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump(buffer_size=64*1024)
self.rcv.flow(1024)
self.pump(buffer_size=64*1024)
#verify remaining messages can be received and accepted
for m in range(1024, 1500):
rd = self.rcv.current
assert rd is not None, m
assert rd.tag == ("tag%s" % m), (rd.tag, m)
msg = self.rcv.recv(1024)
assert msg == ("message %s" % m).encode('ascii'), (msg, m)
rd.update(Delivery.ACCEPTED)
rd.settle()
def test_cleanup(self):
self.rcv.flow(10)
self.pump()
for x in range(10):
self.snd.delivery("tag%d" % x)
msg = str2bin("this is a test")
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.snd.close()
self.snd.free()
self.snd = None
gc.collect()
self.pump()
for x in range(10):
rd = self.rcv.current
assert rd is not None
assert rd.tag == "tag%d" % x
rmsg = self.rcv.recv(1024)
assert self.rcv.advance()
assert rmsg == msg
# close of snd should've settled:
assert rd.settled
rd.settle()
class MaxFrameTransferTest(Test):
def setUp(self):
pass
def cleanup(self):
# release resources created by this class
super(MaxFrameTransferTest, self).cleanup()
self.c1 = None
self.c2 = None
self.snd = None
self.rcv = None
def tearDown(self):
self.cleanup()
def message(self, size):
parts = []
for i in range(size):
parts.append(str(i))
return "/".join(parts)[:size].encode("utf-8")
def testMinFrame(self):
"""
Configure receiver to support minimum max-frame as defined by AMQP-1.0.
Verify transfer of messages larger than 512.
"""
self.snd, self.rcv = self.link("test-link", max_frame=[0,512])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
assert self.rcv.session.connection.transport.max_frame_size == 512
assert self.snd.session.connection.transport.remote_max_frame_size == 512
self.rcv.flow(1)
self.snd.delivery("tag")
msg = self.message(513)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(513)
assert binary == msg
binary = self.rcv.recv(1024)
assert binary == None
def testOddFrame(self):
"""
Test an odd sized max limit with data that will require multiple frames to
be transfered.
"""
self.snd, self.rcv = self.link("test-link", max_frame=[0,521])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
assert self.rcv.session.connection.transport.max_frame_size == 521
assert self.snd.session.connection.transport.remote_max_frame_size == 521
self.rcv.flow(2)
self.snd.delivery("tag")
msg = ("X" * 1699).encode('utf-8')
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(1699)
assert binary == msg
binary = self.rcv.recv(1024)
assert binary == None
self.rcv.advance()
self.snd.delivery("gat")
msg = self.message(1426)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(1426)
assert binary == msg
self.pump()
binary = self.rcv.recv(1024)
assert binary == None
def testBigMessage(self):
"""
Test transfering a big message.
"""
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
self.rcv.flow(2)
self.snd.delivery("tag")
msg = self.message(1024*256)
n = self.snd.send(msg)
assert n == len(msg)
assert self.snd.advance()
self.pump()
binary = self.rcv.recv(1024*256)
assert binary == msg
binary = self.rcv.recv(1024)
assert binary == None
class IdleTimeoutTest(Test):
def setUp(self):
pass
def cleanup(self):
# release resources created by this class
super(IdleTimeoutTest, self).cleanup()
self.snd = None
self.rcv = None
self.c1 = None
self.c2 = None
def tearDown(self):
self.cleanup()
def message(self, size):
parts = []
for i in range(size):
parts.append(str(i))
return "/".join(parts)[:size]
def testGetSet(self):
"""
Verify the configuration and negotiation of the idle timeout.
"""
self.snd, self.rcv = self.link("test-link", idle_timeout=[1.0,2.0])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
# proton advertises 1/2 the configured timeout to the peer:
assert self.rcv.session.connection.transport.idle_timeout == 2.0
assert self.rcv.session.connection.transport.remote_idle_timeout == 0.5
assert self.snd.session.connection.transport.idle_timeout == 1.0
assert self.snd.session.connection.transport.remote_idle_timeout == 1.0
def testTimeout(self):
"""
Verify the AMQP Connection idle timeout.
"""
# snd will timeout the Connection if no frame is received within 1000 ticks
self.snd, self.rcv = self.link("test-link", idle_timeout=[1.0,0])
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
t_snd = self.snd.session.connection.transport
t_rcv = self.rcv.session.connection.transport
assert t_rcv.idle_timeout == 0.0
# proton advertises 1/2 the timeout (see spec)
assert t_rcv.remote_idle_timeout == 0.5
assert t_snd.idle_timeout == 1.0
assert t_snd.remote_idle_timeout == 0.0
sndr_frames_in = t_snd.frames_input
rcvr_frames_out = t_rcv.frames_output
# at t+1msec, nothing should happen:
clock = 0.001
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.251, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# at one tick from expected idle frame send, nothing should happen:
clock = 0.250
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.251, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# this should cause rcvr to expire and send a keepalive
clock = 0.251
assert t_snd.tick(clock) == 1.001, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.501, "deadline to send keepalive"
self.pump()
sndr_frames_in += 1
rcvr_frames_out += 1
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
assert rcvr_frames_out == t_rcv.frames_output, "unexpected frame"
# since a keepalive was received, sndr will rebase its clock against this tick:
# and the receiver should not change its deadline
clock = 0.498
assert t_snd.tick(clock) == 1.498, "deadline for remote timeout"
assert t_rcv.tick(clock) == 0.501, "deadline to send keepalive"
self.pump()
assert sndr_frames_in == t_snd.frames_input, "unexpected received frame"
# now expire sndr
clock = 1.499
t_snd.tick(clock)
self.pump()
assert self.c2.state & Endpoint.REMOTE_CLOSED
assert self.c2.remote_condition.name == "amqp:resource-limit-exceeded"
class CreditTest(Test):
def setUp(self):
self.snd, self.rcv = self.link("test-link", max_frame=(16*1024, 16*1024))
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def cleanup(self):
# release resources created by this class
super(CreditTest, self).cleanup()
self.c1 = None
self.snd = None
self.c2 = None
self.rcv2 = None
self.snd2 = None
def tearDown(self):
self.cleanup()
def testCreditSender(self, count=1024):
credit = self.snd.credit
assert credit == 0, credit
self.rcv.flow(10)
self.pump()
credit = self.snd.credit
assert credit == 10, credit
self.rcv.flow(count)
self.pump()
credit = self.snd.credit
assert credit == 10 + count, credit
def testCreditReceiver(self):
self.rcv.flow(10)
self.pump()
assert self.rcv.credit == 10, self.rcv.credit
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.pump()
assert self.rcv.credit == 10, self.rcv.credit
assert self.rcv.queued == 1, self.rcv.queued
c = self.rcv.current
assert c.tag == "tag", c.tag
assert self.rcv.advance()
assert self.rcv.credit == 9, self.rcv.credit
assert self.rcv.queued == 0, self.rcv.queued
def _testBufferingOnClose(self, a, b):
for i in range(10):
d = self.snd.delivery("tag-%s" % i)
assert d
d.settle()
self.pump()
assert self.snd.queued == 10
endpoints = {"connection": (self.c1, self.c2),
"session": (self.snd.session, self.rcv.session),
"link": (self.snd, self.rcv)}
local_a, remote_a = endpoints[a]
local_b, remote_b = endpoints[b]
remote_b.close()
self.pump()
assert local_b.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED
local_a.close()
self.pump()
assert remote_a.state & Endpoint.REMOTE_CLOSED
assert self.snd.queued == 10
def testBufferingOnCloseLinkLink(self):
self._testBufferingOnClose("link", "link")
def testBufferingOnCloseLinkSession(self):
self._testBufferingOnClose("link", "session")
def testBufferingOnCloseLinkConnection(self):
self._testBufferingOnClose("link", "connection")
def testBufferingOnCloseSessionLink(self):
self._testBufferingOnClose("session", "link")
def testBufferingOnCloseSessionSession(self):
self._testBufferingOnClose("session", "session")
def testBufferingOnCloseSessionConnection(self):
self._testBufferingOnClose("session", "connection")
def testBufferingOnCloseConnectionLink(self):
self._testBufferingOnClose("connection", "link")
def testBufferingOnCloseConnectionSession(self):
self._testBufferingOnClose("connection", "session")
def testBufferingOnCloseConnectionConnection(self):
self._testBufferingOnClose("connection", "connection")
def testFullDrain(self):
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.drain(10)
assert self.rcv.draining()
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
assert self.rcv.draining()
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 0
assert self.rcv.draining()
self.pump()
assert self.rcv.credit == 0
assert self.snd.credit == 0
assert not self.rcv.draining()
drained = self.rcv.drained()
assert drained == 10, drained
def testPartialDrain(self):
self.rcv.drain(2)
assert self.rcv.draining()
self.pump()
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.snd.drained()
assert self.rcv.draining()
self.pump()
assert not self.rcv.draining()
c = self.rcv.current
assert self.rcv.queued == 1, self.rcv.queued
assert c.tag == d.tag, c.tag
assert self.rcv.advance()
assert not self.rcv.current
assert self.rcv.credit == 0, self.rcv.credit
assert not self.rcv.draining()
drained = self.rcv.drained()
assert drained == 1, drained
def testDrainFlow(self):
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.drain(10)
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 0
assert self.snd.credit == 0
self.rcv.flow(10)
assert self.rcv.credit == 10
assert self.snd.credit == 0
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.snd.drained()
assert self.rcv.credit == 10
assert self.snd.credit == 10
self.pump()
assert self.rcv.credit == 10
assert self.snd.credit == 10
drained = self.rcv.drained()
assert drained == 10, drained
def testNegative(self):
assert self.snd.credit == 0
d = self.snd.delivery("tag")
assert d
assert self.snd.advance()
self.pump()
assert self.rcv.credit == 0
assert self.rcv.queued == 0
self.rcv.flow(1)
assert self.rcv.credit == 1
assert self.rcv.queued == 0
self.pump()
assert self.rcv.credit == 1
assert self.rcv.queued == 1, self.rcv.queued
c = self.rcv.current
assert c
assert c.tag == "tag"
assert self.rcv.advance()
assert self.rcv.credit == 0
assert self.rcv.queued == 0
def testDrainZero(self):
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.snd.drained()
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.rcv.drain(0)
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.pump()
assert self.snd.credit == 10
assert self.rcv.credit == 10
assert self.rcv.queued == 0
self.snd.drained()
assert self.snd.credit == 0
assert self.rcv.credit == 10
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 0
self.pump()
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
drained = self.rcv.drained()
assert drained == 10
def testDrainOrder(self):
""" Verify drain/drained works regardless of ordering. See PROTON-401
"""
assert self.snd.credit == 0
assert self.rcv.credit == 0
assert self.rcv.queued == 0
#self.rcv.session.connection.transport.trace(Transport.TRACE_FRM)
#self.snd.session.connection.transport.trace(Transport.TRACE_FRM)
## verify that a sender that has reached the drain state will respond
## promptly to a drain issued by the peer.
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagA")
assert sd
n = self.snd.send(str2bin("A"))
assert n == 1
self.pump()
self.snd.advance()
# done sending, so signal that we are drained:
self.snd.drained()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
self.rcv.drain(0)
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
data = self.rcv.recv(10)
assert data == str2bin("A"), data
self.rcv.advance()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 9, self.rcv.credit
self.snd.drained()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 0, self.rcv.credit
# verify that a drain requested by the peer is not "acknowledged" until
# after the sender has completed sending its pending messages
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagB")
assert sd
n = self.snd.send(str2bin("B"))
assert n == 1
self.snd.advance()
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
self.rcv.drain(0)
self.pump()
assert self.snd.credit == 9, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
sd = self.snd.delivery("tagC")
assert sd
n = self.snd.send(str2bin("C"))
assert n == 1
self.snd.advance()
self.pump()
assert self.snd.credit == 8, self.snd.credit
assert self.rcv.credit == 10, self.rcv.credit
# now that the sender has finished sending everything, it can signal
# drained
self.snd.drained()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 2, self.rcv.credit
data = self.rcv.recv(10)
assert data == str2bin("B"), data
self.rcv.advance()
data = self.rcv.recv(10)
assert data == str2bin("C"), data
self.rcv.advance()
self.pump()
assert self.snd.credit == 0, self.snd.credit
assert self.rcv.credit == 0, self.rcv.credit
def testPushback(self, count=10):
assert self.snd.credit == 0
assert self.rcv.credit == 0
self.rcv.flow(count)
self.pump()
for i in range(count):
d = self.snd.delivery("tag%s" % i)
assert d
self.snd.advance()
assert self.snd.queued == count
assert self.rcv.queued == 0
self.pump()
assert self.snd.queued == 0
assert self.rcv.queued == count
d = self.snd.delivery("extra")
self.snd.advance()
assert self.snd.queued == 1
assert self.rcv.queued == count
self.pump()
assert self.snd.queued == 1
assert self.rcv.queued == count
def testHeadOfLineBlocking(self):
self.snd2 = self.snd.session.sender("link-2")
self.rcv2 = self.rcv.session.receiver("link-2")
self.snd2.open()
self.rcv2.open()
self.pump()
assert self.snd2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
assert self.rcv2.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE
self.rcv.flow(5)
self.rcv2.flow(10)
self.pump()
assert self.snd.credit == 5
assert self.snd2.credit == 10
for i in range(10):
tag = "test %d" % i
self.snd.delivery( tag )
self.snd.send( tag.encode("ascii") )
assert self.snd.advance()
self.snd2.delivery( tag )
self.snd2.send( tag.encode("ascii") )
assert self.snd2.advance()
self.pump()
for i in range(5):
b = self.rcv.recv( 512 )
assert self.rcv.advance()
b = self.rcv2.recv( 512 )
assert self.rcv2.advance()
for i in range(5):
b = self.rcv2.recv( 512 )
assert self.rcv2.advance()
class SessionCreditTest(Test):
def tearDown(self):
self.cleanup()
def testBuffering(self, count=32, size=1024, capacity=16*1024, max_frame=1024):
snd, rcv = self.link("test-link", max_frame=(max_frame, max_frame))
rcv.session.incoming_capacity = capacity
snd.open()
rcv.open()
rcv.flow(count)
self.pump()
assert count > 0
total_bytes = count * size
assert snd.session.outgoing_bytes == 0, snd.session.outgoing_bytes
assert rcv.session.incoming_bytes == 0, rcv.session.incoming_bytes
assert snd.queued == 0, snd.queued
assert rcv.queued == 0, rcv.queued
data = bytes(bytearray(size))
idx = 0
while snd.credit:
d = snd.delivery("tag%s" % idx)
assert d
n = snd.send(data)
assert n == size, (n, size)
assert snd.advance()
self.pump()
idx += 1
assert idx == count, (idx, count)
assert snd.session.outgoing_bytes < total_bytes, (snd.session.outgoing_bytes, total_bytes)
assert rcv.session.incoming_bytes < capacity, (rcv.session.incoming_bytes, capacity)
assert snd.session.outgoing_bytes + rcv.session.incoming_bytes == total_bytes, \
(snd.session.outgoing_bytes, rcv.session.incoming_bytes, total_bytes)
if snd.session.outgoing_bytes > 0:
available = rcv.session.incoming_capacity - rcv.session.incoming_bytes
assert available < max_frame, (available, max_frame)
for i in range(count):
d = rcv.current
assert d, i
pending = d.pending
before = rcv.session.incoming_bytes
assert rcv.advance()
after = rcv.session.incoming_bytes
assert before - after == pending, (before, after, pending)
snd_before = snd.session.incoming_bytes
self.pump()
snd_after = snd.session.incoming_bytes
assert rcv.session.incoming_bytes < capacity
if snd_before > 0:
assert capacity - after <= max_frame
assert snd_before > snd_after
if snd_after > 0:
available = rcv.session.incoming_capacity - rcv.session.incoming_bytes
assert available < max_frame, available
def testBufferingSize16(self):
self.testBuffering(size=16)
def testBufferingSize256(self):
self.testBuffering(size=256)
def testBufferingSize512(self):
self.testBuffering(size=512)
def testBufferingSize2048(self):
self.testBuffering(size=2048)
def testBufferingSize1025(self):
self.testBuffering(size=1025)
def testBufferingSize1023(self):
self.testBuffering(size=1023)
def testBufferingSize989(self):
self.testBuffering(size=989)
def testBufferingSize1059(self):
self.testBuffering(size=1059)
def testCreditWithBuffering(self):
snd, rcv = self.link("test-link", max_frame=(1024, 1024))
rcv.session.incoming_capacity = 64*1024
snd.open()
rcv.open()
rcv.flow(128)
self.pump()
assert snd.credit == 128, snd.credit
assert rcv.queued == 0, rcv.queued
idx = 0
while snd.credit:
d = snd.delivery("tag%s" % idx)
snd.send(("x"*1024).encode('ascii'))
assert d
assert snd.advance()
self.pump()
idx += 1
assert idx == 128, idx
assert rcv.queued < 128, rcv.queued
rcv.flow(1)
self.pump()
assert snd.credit == 1, snd.credit
class SettlementTest(Test):
def setUp(self):
self.snd, self.rcv = self.link("test-link")
self.c1 = self.snd.session.connection
self.c2 = self.rcv.session.connection
self.snd.open()
self.rcv.open()
self.pump()
def cleanup(self):
# release resources created by this class
super(SettlementTest, self).cleanup()
self.c1 = None
self.snd = None
self.c2 = None
self.rcv2 = None
self.snd2 = None
def tearDown(self):
self.cleanup()
def testSettleCurrent(self):
self.rcv.flow(10)
self.pump()
assert self.snd.credit == 10, self.snd.credit
d = self.snd.delivery("tag")
e = self.snd.delivery("tag2")
assert d
assert e
c = self.snd.current
assert c.tag == "tag", c.tag
c.settle()
c = self.snd.current
assert c.tag == "tag2", c.tag
c.settle()
c = self.snd.current
assert not c
self.pump()
c = self.rcv.current
assert c
assert c.tag == "tag", c.tag
assert c.settled
c.settle()
c = self.rcv.current
assert c
assert c.tag == "tag2", c.tag
assert c.settled
c.settle()
c = self.rcv.current
assert not c
def testUnsettled(self):
self.rcv.flow(10)
self.pump()
assert self.snd.unsettled == 0, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
d = self.snd.delivery("tag")
assert d
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
assert self.snd.advance()
self.pump()
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 1, self.rcv.unsettled
c = self.rcv.current
assert c
c.settle()
assert self.snd.unsettled == 1, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
def testMultipleUnsettled(self, count=1024, size=1024):
self.rcv.flow(count)
self.pump()
assert self.snd.unsettled == 0, self.snd.unsettled
assert self.rcv.unsettled == 0, self.rcv.unsettled
unsettled = []
for i in range(count):
sd = self.snd.delivery("tag%s" % i)
assert sd
n = self.snd.send(("x"*size).encode('ascii'))
assert n == size, n
assert self.snd.advance()
self.pump()
rd = self.rcv.current
assert rd, "did not receive delivery %s" % i
n = rd.pending
b = self.rcv.recv(n)
assert len(b) == n, (b, n)
rd.update(Delivery.ACCEPTED)
assert self.rcv.advance()
self.pump()
unsettled.append(rd)
assert self.rcv.unsettled == count
for rd in unsettled:
rd.settle()
def testMultipleUnsettled2K1K(self):
self.testMultipleUnsettled(2048, 1024)
def testMultipleUnsettled4K1K(self):
self.testMultipleUnsettled(4096, 1024)
def testMultipleUnsettled1K2K(self):
self.testMultipleUnsettled(1024, 2048)
def testMultipleUnsettled2K2K(self):
self.testMultipleUnsettled(2048, 2048)
def testMultipleUnsettled4K2K(self):
self.testMultipleUnsettled(4096, 2048)
class PipelineTest(Test):
def setUp(self):
self.c1, self.c2 = self.connection()
def cleanup(self):
# release resources created by this class
super(PipelineTest, self).cleanup()
self.c1 = None
self.c2 = None
def tearDown(self):
self.cleanup()
def test(self):
ssn = self.c1.session()
snd = ssn.sender("sender")
self.c1.open()
ssn.open()
snd.open()
for i in range(10):
d = snd.delivery("delivery-%s" % i)
snd.send(str2bin("delivery-%s" % i))
d.settle()
snd.close()
ssn.close()
self.c1.close()
self.pump()
state = self.c2.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
ssn2 = self.c2.session_head(Endpoint.LOCAL_UNINIT)
assert ssn2
state == ssn2.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
rcv = self.c2.link_head(Endpoint.LOCAL_UNINIT)
assert rcv
state = rcv.state
assert state == (Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_ACTIVE), "%x" % state
self.c2.open()
ssn2.open()
rcv.open()
rcv.flow(10)
assert rcv.queued == 0, rcv.queued
self.pump()
assert rcv.queued == 10, rcv.queued
state = rcv.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
state = ssn2.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
state = self.c2.state
assert state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED), "%x" % state
for i in range(rcv.queued):
d = rcv.current
assert d
assert d.tag == "delivery-%s" % i
d.settle()
assert rcv.queued == 0, rcv.queued
class ServerTest(Test):
def testKeepalive(self):
""" Verify that idle frames are sent to keep a Connection alive
"""
if "java" in sys.platform:
raise Skipped()
idle_timeout = self.delay
server = common.TestServer()
server.start()
class Program:
def on_reactor_init(self, event):
self.conn = event.reactor.connection()
self.conn.hostname = "%s:%s" % (server.host, server.port)
self.conn.open()
self.old_count = None
event.reactor.schedule(3 * idle_timeout, self)
def on_connection_bound(self, event):
event.transport.idle_timeout = idle_timeout
def on_connection_remote_open(self, event):
self.old_count = event.transport.frames_input
def on_timer_task(self, event):
assert self.conn.state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE), "Connection terminated"
assert self.conn.transport.frames_input > self.old_count, "No idle frames received"
self.conn.close()
Reactor(Program()).run()
server.stop()
def testIdleTimeout(self):
""" Verify that a Connection is terminated properly when Idle frames do not
arrive in a timely manner.
"""
if "java" in sys.platform:
raise Skipped()
idle_timeout = self.delay
server = common.TestServer(idle_timeout=idle_timeout)
server.start()
class Program:
def on_reactor_init(self, event):
self.conn = event.reactor.connection()
self.conn.hostname = "%s:%s" % (server.host, server.port)
self.conn.open()
self.remote_condition = None
self.old_count = None
# verify the connection stays up even if we don't explicitly send stuff
# wait up to 3x the idle timeout
event.reactor.schedule(3 * idle_timeout, self)
def on_connection_bound(self, event):
self.transport = event.transport
def on_connection_remote_open(self, event):
self.old_count = event.transport.frames_output
def on_connection_remote_close(self, event):
assert self.conn.remote_condition
assert self.conn.remote_condition.name == "amqp:resource-limit-exceeded"
self.remote_condition = self.conn.remote_condition
def on_timer_task(self, event):
assert self.conn.state == (Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_ACTIVE), "Connection terminated"
assert self.conn.transport.frames_output > self.old_count, "No idle frames sent"
# now wait to explicitly cause the other side to expire:
sleep(3 * idle_timeout)
p = Program()
Reactor(p).run()
assert p.remote_condition
assert p.remote_condition.name == "amqp:resource-limit-exceeded"
server.stop()
class NoValue:
def __init__(self):
pass
def apply(self, dlv):
pass
def check(self, dlv):
assert dlv.data == None
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class RejectValue:
def __init__(self, condition):
self.condition = condition
def apply(self, dlv):
dlv.condition = self.condition
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == self.condition, (dlv.condition, self.condition)
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class ReceivedValue:
def __init__(self, section_number, section_offset):
self.section_number = section_number
self.section_offset = section_offset
def apply(self, dlv):
dlv.section_number = self.section_number
dlv.section_offset = self.section_offset
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == self.section_number, (dlv.section_number, self.section_number)
assert dlv.section_offset == self.section_offset
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class ModifiedValue:
def __init__(self, failed, undeliverable, annotations):
self.failed = failed
self.undeliverable = undeliverable
self.annotations = annotations
def apply(self, dlv):
dlv.failed = self.failed
dlv.undeliverable = self.undeliverable
dlv.annotations = self.annotations
def check(self, dlv):
assert dlv.data == None, dlv.data
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == self.failed
assert dlv.undeliverable == self.undeliverable
assert dlv.annotations == self.annotations, (dlv.annotations, self.annotations)
class CustomValue:
def __init__(self, data):
self.data = data
def apply(self, dlv):
dlv.data = self.data
def check(self, dlv):
assert dlv.data == self.data, (dlv.data, self.data)
assert dlv.section_number == 0
assert dlv.section_offset == 0
assert dlv.condition == None
assert dlv.failed == False
assert dlv.undeliverable == False
assert dlv.annotations == None
class DeliveryTest(Test):
def tearDown(self):
self.cleanup()
def testDisposition(self, count=1, tag="tag%i", type=Delivery.ACCEPTED, value=NoValue()):
snd, rcv = self.link("test-link")
snd.open()
rcv.open()
snd_deliveries = []
for i in range(count):
d = snd.delivery(tag % i)
snd_deliveries.append(d)
snd.advance()
rcv.flow(count)
self.pump()
rcv_deliveries = []
for i in range(count):
d = rcv.current
assert d.tag == (tag % i)
rcv_deliveries.append(d)
rcv.advance()
for d in rcv_deliveries:
value.apply(d.local)
d.update(type)
self.pump()
for d in snd_deliveries:
assert d.remote_state == type
assert d.remote.type == type
value.check(d.remote)
value.apply(d.local)
d.update(type)
self.pump()
for d in rcv_deliveries:
assert d.remote_state == type
assert d.remote.type == type
value.check(d.remote)
for d in snd_deliveries:
d.settle()
self.pump()
for d in rcv_deliveries:
assert d.settled, d.settled
d.settle()
def testReceived(self):
self.testDisposition(type=Disposition.RECEIVED, value=ReceivedValue(1, 2))
def testRejected(self):
self.testDisposition(type=Disposition.REJECTED, value=RejectValue(Condition(symbol("foo"))))
def testReleased(self):
self.testDisposition(type=Disposition.RELEASED)
def testModified(self):
self.testDisposition(type=Disposition.MODIFIED,
value=ModifiedValue(failed=True, undeliverable=True,
annotations={"key": "value"}))
def testCustom(self):
self.testDisposition(type=0x12345, value=CustomValue([1, 2, 3]))
class CollectorTest(Test):
def setUp(self):
self.collector = Collector()
def drain(self):
result = []
while True:
e = self.collector.peek()
if e:
result.append(e)
self.collector.pop()
else:
break
return result
def expect(self, *types):
return self.expect_oneof(types)
def expect_oneof(self, *sequences):
events = self.drain()
types = tuple([e.type for e in events])
for alternative in sequences:
if types == alternative:
if len(events) == 1:
return events[0]
elif len(events) > 1:
return events
else:
return
assert False, "actual events %s did not match any of the expected sequences: %s" % (events, sequences)
def expect_until(self, *types):
events = self.drain()
etypes = tuple([e.type for e in events[-len(types):]])
assert etypes == types, "actual events %s did not end in expect sequence: %s" % (events, types)
class EventTest(CollectorTest):
def tearDown(self):
self.cleanup()
def testEndpointEvents(self):
c1, c2 = self.connection()
c1.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
self.pump()
self.expect()
c2.open()
self.pump()
self.expect(Event.CONNECTION_REMOTE_OPEN)
self.pump()
self.expect()
ssn = c2.session()
snd = ssn.sender("sender")
ssn.open()
snd.open()
self.expect()
self.pump()
self.expect(Event.SESSION_INIT, Event.SESSION_REMOTE_OPEN,
Event.LINK_INIT, Event.LINK_REMOTE_OPEN)
c1.open()
ssn2 = c1.session()
ssn2.open()
rcv = ssn2.receiver("receiver")
rcv.open()
self.pump()
self.expect(Event.CONNECTION_LOCAL_OPEN, Event.TRANSPORT,
Event.SESSION_INIT, Event.SESSION_LOCAL_OPEN,
Event.TRANSPORT, Event.LINK_INIT, Event.LINK_LOCAL_OPEN,
Event.TRANSPORT)
rcv.close()
self.expect(Event.LINK_LOCAL_CLOSE, Event.TRANSPORT)
self.pump()
rcv.free()
del rcv
self.expect(Event.LINK_FINAL)
ssn2.free()
del ssn2
self.pump()
c1.free()
c1.transport.unbind()
self.expect_oneof((Event.SESSION_FINAL, Event.LINK_FINAL, Event.SESSION_FINAL,
Event.CONNECTION_UNBOUND, Event.CONNECTION_FINAL),
(Event.CONNECTION_UNBOUND, Event.SESSION_FINAL, Event.LINK_FINAL,
Event.SESSION_FINAL, Event.CONNECTION_FINAL))
def testConnectionINIT_FINAL(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
c.free()
self.expect(Event.CONNECTION_FINAL)
def testSessionINIT_FINAL(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
s = c.session()
self.expect(Event.SESSION_INIT)
s.free()
self.expect(Event.SESSION_FINAL)
c.free()
self.expect(Event.CONNECTION_FINAL)
def testLinkINIT_FINAL(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
s = c.session()
self.expect(Event.SESSION_INIT)
r = s.receiver("asdf")
self.expect(Event.LINK_INIT)
r.free()
self.expect(Event.LINK_FINAL)
c.free()
self.expect(Event.SESSION_FINAL, Event.CONNECTION_FINAL)
def testFlowEvents(self):
snd, rcv = self.link("test-link")
snd.session.connection.collect(self.collector)
rcv.open()
rcv.flow(10)
self.pump()
self.expect(Event.CONNECTION_INIT, Event.SESSION_INIT,
Event.LINK_INIT, Event.LINK_REMOTE_OPEN, Event.LINK_FLOW)
rcv.flow(10)
self.pump()
self.expect(Event.LINK_FLOW)
return snd, rcv
def testDeliveryEvents(self):
snd, rcv = self.link("test-link")
rcv.session.connection.collect(self.collector)
rcv.open()
rcv.flow(10)
self.pump()
self.expect(Event.CONNECTION_INIT, Event.SESSION_INIT,
Event.LINK_INIT, Event.LINK_LOCAL_OPEN, Event.TRANSPORT)
snd.delivery("delivery")
snd.send(str2bin("Hello World!"))
snd.advance()
self.pump()
self.expect()
snd.open()
self.pump()
self.expect(Event.LINK_REMOTE_OPEN, Event.DELIVERY)
rcv.session.connection.transport.unbind()
rcv.session.connection.free()
self.expect(Event.CONNECTION_UNBOUND, Event.TRANSPORT, Event.LINK_FINAL,
Event.SESSION_FINAL, Event.CONNECTION_FINAL)
def testDeliveryEventsDisp(self):
snd, rcv = self.testFlowEvents()
snd.open()
dlv = snd.delivery("delivery")
snd.send(str2bin("Hello World!"))
assert snd.advance()
self.expect(Event.LINK_LOCAL_OPEN, Event.TRANSPORT)
self.pump()
self.expect(Event.LINK_FLOW)
rdlv = rcv.current
assert rdlv != None
assert rdlv.tag == "delivery"
rdlv.update(Delivery.ACCEPTED)
self.pump()
event = self.expect(Event.DELIVERY)
assert event.context == dlv, (dlv, event.context)
def testConnectionBOUND_UNBOUND(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
t = Transport()
t.bind(c)
self.expect(Event.CONNECTION_BOUND)
t.unbind()
self.expect(Event.CONNECTION_UNBOUND, Event.TRANSPORT)
def testTransportERROR_CLOSE(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
t = Transport()
t.bind(c)
self.expect(Event.CONNECTION_BOUND)
assert t.condition is None
t.push(str2bin("asdf"))
self.expect(Event.TRANSPORT_ERROR, Event.TRANSPORT_TAIL_CLOSED)
assert t.condition is not None
assert t.condition.name == "amqp:connection:framing-error"
assert "AMQP header mismatch" in t.condition.description
p = t.pending()
assert p > 0
t.pop(p)
self.expect(Event.TRANSPORT_HEAD_CLOSED, Event.TRANSPORT_CLOSED)
def testTransportCLOSED(self):
c = Connection()
c.collect(self.collector)
self.expect(Event.CONNECTION_INIT)
t = Transport()
t.bind(c)
c.open()
self.expect(Event.CONNECTION_BOUND, Event.CONNECTION_LOCAL_OPEN, Event.TRANSPORT)
c2 = Connection()
t2 = Transport()
t2.bind(c2)
c2.open()
c2.close()
pump(t, t2)
self.expect(Event.CONNECTION_REMOTE_OPEN, Event.CONNECTION_REMOTE_CLOSE,
Event.TRANSPORT_TAIL_CLOSED)
c.close()
pump(t, t2)
self.expect(Event.CONNECTION_LOCAL_CLOSE, Event.TRANSPORT,
Event.TRANSPORT_HEAD_CLOSED, Event.TRANSPORT_CLOSED)
def testLinkDetach(self):
c1 = Connection()
c1.collect(self.collector)
t1 = Transport()
t1.bind(c1)
c1.open()
s1 = c1.session()
s1.open()
l1 = s1.sender("asdf")
l1.open()
l1.detach()
self.expect_until(Event.LINK_LOCAL_DETACH, Event.TRANSPORT)
c2 = Connection()
c2.collect(self.collector)
t2 = Transport()
t2.bind(c2)
pump(t1, t2)
self.expect_until(Event.LINK_REMOTE_DETACH)
class PeerTest(CollectorTest):
def setUp(self):
CollectorTest.setUp(self)
self.connection = Connection()
self.connection.collect(self.collector)
self.transport = Transport()
self.transport.bind(self.connection)
self.peer = Connection()
self.peer_transport = Transport()
self.peer_transport.bind(self.peer)
self.peer_transport.trace(Transport.TRACE_OFF)
def pump(self):
pump(self.transport, self.peer_transport)
class TeardownLeakTest(PeerTest):
def doLeak(self, local, remote):
self.connection.open()
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND,
Event.CONNECTION_LOCAL_OPEN, Event.TRANSPORT)
ssn = self.connection.session()
ssn.open()
self.expect(Event.SESSION_INIT, Event.SESSION_LOCAL_OPEN, Event.TRANSPORT)
snd = ssn.sender("sender")
snd.open()
self.expect(Event.LINK_INIT, Event.LINK_LOCAL_OPEN, Event.TRANSPORT)
self.pump()
self.peer.open()
self.peer.session_head(0).open()
self.peer.link_head(0).open()
self.pump()
self.expect_oneof((Event.CONNECTION_REMOTE_OPEN, Event.SESSION_REMOTE_OPEN,
Event.LINK_REMOTE_OPEN, Event.LINK_FLOW),
(Event.CONNECTION_REMOTE_OPEN, Event.SESSION_REMOTE_OPEN,
Event.LINK_REMOTE_OPEN))
if local:
snd.close() # ha!!
self.expect(Event.LINK_LOCAL_CLOSE, Event.TRANSPORT)
ssn.close()
self.expect(Event.SESSION_LOCAL_CLOSE, Event.TRANSPORT)
self.connection.close()
self.expect(Event.CONNECTION_LOCAL_CLOSE, Event.TRANSPORT)
if remote:
self.peer.link_head(0).close() # ha!!
self.peer.session_head(0).close()
self.peer.close()
self.pump()
if remote:
self.expect(Event.TRANSPORT_HEAD_CLOSED, Event.LINK_REMOTE_CLOSE,
Event.SESSION_REMOTE_CLOSE, Event.CONNECTION_REMOTE_CLOSE,
Event.TRANSPORT_TAIL_CLOSED, Event.TRANSPORT_CLOSED)
else:
self.expect(Event.TRANSPORT_HEAD_CLOSED, Event.SESSION_REMOTE_CLOSE,
Event.CONNECTION_REMOTE_CLOSE, Event.TRANSPORT_TAIL_CLOSED,
Event.TRANSPORT_CLOSED)
self.connection.free()
self.expect(Event.LINK_FINAL, Event.SESSION_FINAL)
self.transport.unbind()
self.expect(Event.CONNECTION_UNBOUND, Event.CONNECTION_FINAL)
def testLocalRemoteLeak(self):
self.doLeak(True, True)
def testLocalLeak(self):
self.doLeak(True, False)
def testRemoteLeak(self):
self.doLeak(False, True)
def testLeak(self):
self.doLeak(False, False)
class IdleTimeoutEventTest(PeerTest):
def half_pump(self):
p = self.transport.pending()
if p>0:
self.transport.pop(p)
def testTimeoutWithZombieServer(self, expectOpenCloseFrames=True):
self.transport.idle_timeout = self.delay
self.connection.open()
self.half_pump()
self.transport.tick(time())
sleep(self.delay*2)
self.transport.tick(time())
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND,
Event.CONNECTION_LOCAL_OPEN, Event.TRANSPORT,
Event.TRANSPORT_ERROR, Event.TRANSPORT_TAIL_CLOSED)
assert self.transport.capacity() < 0
if expectOpenCloseFrames:
assert self.transport.pending() > 0
self.half_pump()
self.expect(Event.TRANSPORT_HEAD_CLOSED, Event.TRANSPORT_CLOSED)
assert self.transport.pending() < 0
def testTimeoutWithZombieServerAndSASL(self):
sasl = self.transport.sasl()
self.testTimeoutWithZombieServer(expectOpenCloseFrames=False)
class DeliverySegFaultTest(Test):
def testDeliveryAfterUnbind(self):
conn = Connection()
t = Transport()
ssn = conn.session()
snd = ssn.sender("sender")
dlv = snd.delivery("tag")
dlv.settle()
del dlv
t.bind(conn)
t.unbind()
dlv = snd.delivery("tag")
class SaslEventTest(CollectorTest):
def testAnonymousNoInitialResponse(self):
if "java" in sys.platform:
raise Skipped()
conn = Connection()
conn.collect(self.collector)
transport = Transport(Transport.SERVER)
transport.bind(conn)
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND)
transport.push(str2bin('AMQP\x03\x01\x00\x00\x00\x00\x00 \x02\x01\x00\x00\x00SA'
'\xd0\x00\x00\x00\x10\x00\x00\x00\x02\xa3\tANONYMOUS@'
'AMQP\x00\x01\x00\x00'))
self.expect(Event.TRANSPORT)
for i in range(1024):
p = transport.pending()
self.drain()
p = transport.pending()
self.expect()
def testPipelinedServerReadFirst(self):
if "java" in sys.platform:
raise Skipped()
conn = Connection()
conn.collect(self.collector)
transport = Transport(Transport.CLIENT)
s = transport.sasl()
s.allowed_mechs("ANONYMOUS PLAIN")
transport.bind(conn)
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND)
transport.push(str2bin('AMQP\x03\x01\x00\x00\x00\x00\x00\x1c\x02\x01\x00\x00\x00S@'
'\xc0\x0f\x01\xe0\x0c\x01\xa3\tANONYMOUS\x00\x00\x00\x10'
'\x02\x01\x00\x00\x00SD\xc0\x03\x01P\x00AMQP\x00\x01\x00'
'\x00'))
self.expect(Event.TRANSPORT)
p = transport.pending()
bytes = transport.peek(p)
transport.pop(p)
server = Transport(Transport.SERVER)
server.push(bytes)
assert server.sasl().outcome == SASL.OK
def testPipelinedServerWriteFirst(self):
if "java" in sys.platform:
raise Skipped()
conn = Connection()
conn.collect(self.collector)
transport = Transport(Transport.CLIENT)
s = transport.sasl()
s.allowed_mechs("ANONYMOUS")
transport.bind(conn)
p = transport.pending()
bytes = transport.peek(p)
transport.pop(p)
self.expect(Event.CONNECTION_INIT, Event.CONNECTION_BOUND, Event.TRANSPORT)
transport.push(str2bin('AMQP\x03\x01\x00\x00\x00\x00\x00\x1c\x02\x01\x00\x00\x00S@'
'\xc0\x0f\x01\xe0\x0c\x01\xa3\tANONYMOUS\x00\x00\x00\x10'
'\x02\x01\x00\x00\x00SD\xc0\x03\x01P\x00AMQP\x00\x01\x00'
'\x00'))
self.expect(Event.TRANSPORT)
p = transport.pending()
bytes = transport.peek(p)
transport.pop(p)
# XXX: the bytes above appear to be correct, but we don't get any
# sort of event indicating that the transport is authenticated
| 28.816458 | 107 | 0.660332 |
7946d30e82212c9e486fe9a21d659274868ebddb | 587 | py | Python | output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_max_exclusive_2_xsd/nistschema_sv_iv_atomic_g_year_month_max_exclusive_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_max_exclusive_2_xsd/nistschema_sv_iv_atomic_g_year_month_max_exclusive_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_max_exclusive_2_xsd/nistschema_sv_iv_atomic_g_year_month_max_exclusive_2.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
from xsdata.models.datatype import XmlPeriod
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-gYearMonth-maxExclusive-2-NS"
@dataclass
class NistschemaSvIvAtomicGYearMonthMaxExclusive2:
class Meta:
name = "NISTSchema-SV-IV-atomic-gYearMonth-maxExclusive-2"
namespace = "NISTSchema-SV-IV-atomic-gYearMonth-maxExclusive-2-NS"
value: Optional[XmlPeriod] = field(
default=None,
metadata={
"required": True,
"max_exclusive": XmlPeriod("1983-06"),
}
)
| 27.952381 | 74 | 0.696763 |
7946d74dda46d11ef61b21b248bd176363c3b8dd | 6,268 | py | Python | pymc3/step_methods/hmc/trajectory.py | vpolisky/pymc3 | 87cdd712c86321121c2ed3150764f3d847f5083c | [
"Apache-2.0"
] | null | null | null | pymc3/step_methods/hmc/trajectory.py | vpolisky/pymc3 | 87cdd712c86321121c2ed3150764f3d847f5083c | [
"Apache-2.0"
] | null | null | null | pymc3/step_methods/hmc/trajectory.py | vpolisky/pymc3 | 87cdd712c86321121c2ed3150764f3d847f5083c | [
"Apache-2.0"
] | 1 | 2021-04-13T10:31:26.000Z | 2021-04-13T10:31:26.000Z | from collections import namedtuple
from pymc3.theanof import join_nonshared_inputs, gradient, CallableTensor
import theano
import theano.tensor as tt
Hamiltonian = namedtuple("Hamiltonian", "logp, dlogp, pot")
def _theano_hamiltonian(model_vars, shared, logpt, potential):
"""Creates a Hamiltonian with shared inputs.
Parameters
----------
model_vars : array of variables to be sampled
shared : theano tensors that are already shared
logpt : model log probability
potential : hamiltonian potential
Returns
-------
Hamiltonian : namedtuple with log pdf, gradient of log pdf, and potential functions
q : Starting position variable.
"""
dlogp = gradient(logpt, model_vars)
(logp, dlogp), q = join_nonshared_inputs([logpt, dlogp], model_vars, shared)
dlogp_func = theano.function(inputs=[q], outputs=dlogp)
dlogp_func.trust_input = True
logp = CallableTensor(logp)
dlogp = CallableTensor(dlogp)
return Hamiltonian(logp, dlogp, potential), q, dlogp_func
def _theano_energy_function(H, q, **theano_kwargs):
"""Creates a Hamiltonian with shared inputs.
Parameters
----------
H : Hamiltonian namedtuple
q : theano variable, starting position
theano_kwargs : passed to theano.function
Returns
-------
energy_function : theano function that computes the energy at a point (p, q) in phase space
p : Starting momentum variable.
"""
p = tt.vector('p')
p.tag.test_value = q.tag.test_value
total_energy = H.pot.energy(p) - H.logp(q)
energy_function = theano.function(inputs=[q, p], outputs=total_energy, **theano_kwargs)
energy_function.trust_input = True
return energy_function, p
def _theano_leapfrog_integrator(H, q, p, **theano_kwargs):
"""Computes a theano function that computes one leapfrog step and the energy at the
end of the trajectory.
Parameters
----------
H : Hamiltonian
q : theano.tensor
p : theano.tensor
theano_kwargs : passed to theano.function
Returns
-------
theano function which returns
q_new, p_new, energy_new
"""
epsilon = tt.scalar('epsilon')
epsilon.tag.test_value = 1
n_steps = tt.iscalar('n_steps')
n_steps.tag.test_value = 2
q_new, p_new = leapfrog(H, q, p, epsilon, n_steps)
energy_new = energy(H, q_new, p_new)
f = theano.function([q, p, epsilon, n_steps], [q_new, p_new, energy_new], **theano_kwargs)
f.trust_input = True
return f
def get_theano_hamiltonian_functions(model_vars, shared, logpt, potential,
use_single_leapfrog=False, **theano_kwargs):
"""Construct theano functions for the Hamiltonian, energy, and leapfrog integrator.
Parameters
----------
model_vars : array of variables to be sampled
shared : theano tensors that are already shared
logpt : model log probability
potential : Hamiltonian potential
theano_kwargs : dictionary of keyword arguments to pass to theano functions
use_single_leapfrog : Boolean, if only 1 integration step is done at a time (as in NUTS),
this provides a ~2x speedup
Returns
-------
H : Hamiltonian namedtuple
energy_function : theano function computing energy at a point in phase space
leapfrog_integrator : theano function integrating the Hamiltonian from a point in phase space
theano_variables : dictionary of variables used in the computation graph which may be useful
"""
H, q, dlogp = _theano_hamiltonian(model_vars, shared, logpt, potential)
energy_function, p = _theano_energy_function(H, q, **theano_kwargs)
if use_single_leapfrog:
leapfrog_integrator = _theano_single_leapfrog(H, q, p, H.dlogp(q), **theano_kwargs)
else:
leapfrog_integrator = _theano_leapfrog_integrator(H, q, p, **theano_kwargs)
return H, energy_function, leapfrog_integrator, dlogp
def energy(H, q, p):
"""Compute the total energy for the Hamiltonian at a given position/momentum"""
return H.pot.energy(p) - H.logp(q)
def leapfrog(H, q, p, epsilon, n_steps):
"""Leapfrog integrator.
Estimates `p(t)` and `q(t)` at time :math:`t = n \cdot e`, by integrating the
Hamiltonian equations
.. math::
\frac{dq_i}{dt} = \frac{\partial H}{\partial p_i}
\frac{dp_i}{dt} = \frac{\partial H}{\partial q_i}
with :math:`p(0) = p`, :math:`q(0) = q`
Parameters
----------
H : Hamiltonian instance.
Tuple of `logp, dlogp, potential`.
q : Theano.tensor
initial position vector
p : Theano.tensor
initial momentum vector
epsilon : float, step size
n_steps : int, number of iterations
Returns
-------
position : Theano.tensor
position estimate at time :math:`n \cdot e`.
momentum : Theano.tensor
momentum estimate at time :math:`n \cdot e`.
"""
def full_update(p, q):
p = p + epsilon * H.dlogp(q)
q += epsilon * H.pot.velocity(p)
return p, q
# This first line can't be +=, possibly because of theano
p = p + 0.5 * epsilon * H.dlogp(q) # half momentum update
q += epsilon * H.pot.velocity(p) # full position update
if tt.gt(n_steps, 1):
(p_seq, q_seq), _ = theano.scan(full_update, outputs_info=[p, q], n_steps=n_steps - 1)
p, q = p_seq[-1], q_seq[-1]
p += 0.5 * epsilon * H.dlogp(q) # half momentum update
return q, p
def _theano_single_leapfrog(H, q, p, q_grad, **theano_kwargs):
"""Leapfrog integrator for a single step.
See above for documentation. This is optimized for the case where only a single step is
needed, in case of, for example, a recursive algorithm.
"""
epsilon = tt.scalar('epsilon')
epsilon.tag.test_value = 1.
p_new = p + 0.5 * epsilon * q_grad # half momentum update
q_new = q + epsilon * H.pot.velocity(p_new) # full position update
q_new_grad = H.dlogp(q_new)
p_new += 0.5 * epsilon * q_new_grad # half momentum update
energy_new = energy(H, q_new, p_new)
f = theano.function(inputs=[q, p, q_grad, epsilon],
outputs=[q_new, p_new, q_new_grad, energy_new], **theano_kwargs)
f.trust_input = True
return f
| 33.164021 | 97 | 0.662093 |
7946d8141892428550b4cf2a571578307e8dcfdd | 1,643 | py | Python | setup.py | catalystneuro/ndx-labmetadata-giocomo | 0371ef86be78eb017fcdaf28cc908d7a6b6c1a1f | [
"BSD-3-Clause"
] | null | null | null | setup.py | catalystneuro/ndx-labmetadata-giocomo | 0371ef86be78eb017fcdaf28cc908d7a6b6c1a1f | [
"BSD-3-Clause"
] | 1 | 2019-11-05T10:45:52.000Z | 2019-11-19T04:56:49.000Z | setup.py | catalystneuro/ndx-labmetadata-giocomo | 0371ef86be78eb017fcdaf28cc908d7a6b6c1a1f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
from shutil import copy2
# Get the long description from the README file
with open('README.md', 'r') as f:
long_description = f.read()
# Get requirements
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
setup_args = dict(
name='ndx-labmetadata-giocomo',
version='0.1.0',
description='NWB extension for storing metadata for Giocomo lab',
author='Luiz Tauffer, Szonja Weigl and Ben Dichter',
author_email='[email protected]',
url='https://github.com/catalystneuro/ndx-labmetadata-giocomo',
packages=find_packages('src/pynwb'),
package_dir={'': 'src/pynwb'},
include_package_data=True,
package_data={'ndx_labmetadata_giocomo': ['spec/ndx-labmetadata-giocomo.namespace.yaml',
'spec/ndx-labmetadata-giocomo.extensions.yaml']},
classifiers=["Intended Audience :: Developers",
"Intended Audience :: Science/Research"],
zip_safe=False,
install_requires=install_requires,
)
def _copy_spec_files(project_dir):
ns_path = os.path.join(project_dir, 'spec', 'ndx-labmetadata-giocomo.namespace.yaml')
ext_path = os.path.join(project_dir, 'spec', 'ndx-labmetadata-giocomo.extensions.yaml')
dst_dir = os.path.join(project_dir, 'src', 'pynwb', 'ndx_labmetadata_giocomo', 'spec')
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
copy2(ns_path, dst_dir)
copy2(ext_path, dst_dir)
if __name__ == '__main__':
_copy_spec_files(os.path.dirname(__file__))
setup(**setup_args)
| 32.215686 | 95 | 0.684723 |
7946d958ce438a2697165fcb4c6b74fbf665ad86 | 2,883 | py | Python | examples/ExecutionPools/Remote/tasks.py | kelliott55/testplan | 05d20033ac64a1dd6673ab6f70208b22ce17c545 | [
"Apache-2.0"
] | null | null | null | examples/ExecutionPools/Remote/tasks.py | kelliott55/testplan | 05d20033ac64a1dd6673ab6f70208b22ce17c545 | [
"Apache-2.0"
] | null | null | null | examples/ExecutionPools/Remote/tasks.py | kelliott55/testplan | 05d20033ac64a1dd6673ab6f70208b22ce17c545 | [
"Apache-2.0"
] | null | null | null | """TCP connections tests to be executed in parallel in a remote pool."""
import os
import time
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.common.utils.context import context
from testplan.testing.multitest.driver.tcp import TCPServer, TCPClient
def after_start(env):
"""
Called right after MultiTest starts.
"""
# Server accepts connection request made by client.
env.server.accept_connection()
@testsuite
class TCPTestsuite(object):
"""TCP communication tests."""
def __init__(self, files):
self._process_id = os.getpid()
self._files = files
def setup(self, env, result):
result.log("LOCAL_USER: {}".format(os.environ["LOCAL_USER"]))
for _file in self._files:
with open(_file) as fobj:
result.log("Source file contents: {}".format(fobj.read()))
@testcase
def send_and_receive_msg(self, env, result):
"""
Server client communication with a sleep in the middle that
represents processing time by the server before respond.
"""
# Client sends a message.
msg = env.client.cfg.name
result.log(
"Client with process id {} is sending: {}".format(
self._process_id, msg
)
)
bytes_sent = env.client.send_text(msg)
received = env.server.receive_text(size=bytes_sent)
result.equal(received, msg, "Server received")
start_time = time.time()
# Sleeping here to represent a time consuming processing
# of the message received by the server before replying back.
time.sleep(1)
result.log(
"Server was processing message for {}s".format(
round(time.time() - start_time, 1)
)
)
response = "Hello {}".format(received)
result.log(
"Server with process id {} is responding: {}".format(
self._process_id, response
)
)
# Server sends the reply.
bytes_sent = env.server.send_text(response)
received = env.client.receive_text(size=bytes_sent)
result.equal(received, response, "Client received")
def make_multitest(index=0, files=None):
"""
Creates a new MultiTest that runs TCP connection tests.
This will be created inside a remote worker.
"""
print("Creating a MultiTest on process id {}.".format(os.getpid()))
test = MultiTest(
name="TCPMultiTest_{}".format(index),
suites=[TCPTestsuite(files)],
environment=[
TCPServer(name="server"),
TCPClient(
name="client",
host=context("server", "{{host}}"),
port=context("server", "{{port}}"),
),
],
after_start=after_start,
)
return test
| 30.670213 | 74 | 0.602151 |
7946d98924c3b6ea4269463247b8b55d968a3282 | 409 | py | Python | main/migrations/0002_activity_age_grade.py | cablespaghetti/running-club-challenge | 46bc289084c5c089154f456ac2b8901924653ead | [
"MIT"
] | null | null | null | main/migrations/0002_activity_age_grade.py | cablespaghetti/running-club-challenge | 46bc289084c5c089154f456ac2b8901924653ead | [
"MIT"
] | null | null | null | main/migrations/0002_activity_age_grade.py | cablespaghetti/running-club-challenge | 46bc289084c5c089154f456ac2b8901924653ead | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2021-12-17 22:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='activity',
name='age_grade',
field=models.FloatField(default=50.0),
preserve_default=False,
),
]
| 20.45 | 50 | 0.584352 |
7946da2979f14377d08c1c1b154b71552f08e332 | 3,456 | py | Python | docusign_esign/models/credential.py | pivotal-energy-solutions/docusign-python-client | f3edd0b82e57999bc8848a63a0477712714ee437 | [
"MIT"
] | null | null | null | docusign_esign/models/credential.py | pivotal-energy-solutions/docusign-python-client | f3edd0b82e57999bc8848a63a0477712714ee437 | [
"MIT"
] | null | null | null | docusign_esign/models/credential.py | pivotal-energy-solutions/docusign-python-client | f3edd0b82e57999bc8848a63a0477712714ee437 | [
"MIT"
] | 1 | 2021-04-26T20:52:45.000Z | 2021-04-26T20:52:45.000Z | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Credential(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, type=None, value=None):
"""
Credential - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'type': 'str',
'value': 'str'
}
self.attribute_map = {
'type': 'type',
'value': 'value'
}
self._type = type
self._value = value
@property
def type(self):
"""
Gets the type of this Credential.
:return: The type of this Credential.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this Credential.
:param type: The type of this Credential.
:type: str
"""
self._type = type
@property
def value(self):
"""
Gets the value of this Credential.
Specifies the value of the tab.
:return: The value of this Credential.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this Credential.
Specifies the value of the tab.
:param value: The value of this Credential.
:type: str
"""
self._value = value
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.510638 | 126 | 0.520255 |
7946db4208ecec6801584c7458f2f5294d3c9166 | 9,246 | py | Python | pyrolite/comp/codata.py | JustinGOSSES/pyrolite | 21eb5b28d9295625241b73b820fc8892b00fc6b0 | [
"BSD-3-Clause"
] | 1 | 2020-03-13T07:11:47.000Z | 2020-03-13T07:11:47.000Z | pyrolite/comp/codata.py | JustinGOSSES/pyrolite | 21eb5b28d9295625241b73b820fc8892b00fc6b0 | [
"BSD-3-Clause"
] | null | null | null | pyrolite/comp/codata.py | JustinGOSSES/pyrolite | 21eb5b28d9295625241b73b820fc8892b00fc6b0 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pandas as pd
import scipy.stats as scpstats
import scipy.special as scpspec
# from .renorm import renormalise, close
from ..util.math import orthogonal_basis_default, orthogonal_basis_from_array
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
def close(X: np.ndarray, sumf=np.sum):
"""
Closure operator for compositional data.
Parameters
-----------
X : :class:`numpy.ndarray`
Array to close.
sumf : :class:`callable`, :func:`numpy.sum`
Sum function to use for closure.
Returns
--------
:class:`numpy.ndarray`
Closed array.
Notes
------
* Does not check for non-positive entries.
"""
if X.ndim == 2:
return np.divide(X, sumf(X, axis=1)[:, np.newaxis])
else:
return np.divide(X, sumf(X, axis=0))
def renormalise(df: pd.DataFrame, components: list = [], scale=100.0):
"""
Renormalises compositional data to ensure closure.
Parameters
------------
df : :class:`pandas.DataFrame`
Dataframe to renomalise.
components : :class:`list`
Option subcompositon to renormalise to 100. Useful for the use case
where compostional data and non-compositional data are stored in the
same dataframe.
scale : :class:`float`, :code:`100.`
Closure parameter. Typically either 100 or 1.
Returns
--------
:class:`pandas.DataFrame`
Renormalized dataframe.
"""
dfc = df.copy(deep=True)
if components:
cmpnts = [c for c in components if c in dfc.columns]
dfc.loc[:, cmpnts] = scale * dfc.loc[:, cmpnts].divide(
dfc.loc[:, cmpnts].sum(axis=1).replace(0, np.nan), axis=0
)
return dfc
else:
dfc = dfc.divide(dfc.sum(axis=1).replace(0, 100.0), axis=0) * scale
return dfc
def alr(X: np.ndarray, ind: int = -1, null_col=False):
"""
Additive Log Ratio transformation.
Parameters
---------------
X: :class:`numpy.ndarray`
Array on which to perform the transformation, of shape :code:`(N, D)`.
ind: :class:`int`
Index of column used as denominator.
null_col : :class:`bool`
Whether to keep the redundant column.
Returns
---------
:class:`numpy.ndarray`
ALR-transformed array, of shape :code:`(N, D-1)`.
"""
Y = X.copy()
assert Y.ndim in [1, 2]
dimensions = Y.shape[Y.ndim - 1]
if ind < 0:
ind += dimensions
if Y.ndim == 2:
Y = np.divide(Y, Y[:, ind][:, np.newaxis])
if not null_col:
Y = Y[:, [i for i in range(dimensions) if not i == ind]]
else:
Y = np.divide(X, X[ind])
if not null_col:
Y = Y[[i for i in range(dimensions) if not i == ind]]
return np.log(Y)
def inverse_alr(Y: np.ndarray, ind=-1, null_col=False):
"""
Inverse Centred Log Ratio transformation.
Parameters
---------------
Y : :class:`numpy.ndarray`
Array on which to perform the inverse transformation, of shape :code:`(N, D-1)`.
ind : :class:`int`
Index of column used as denominator.
null_col : :class:`bool`, :code:`False`
Whether the array contains an extra redundant column
(i.e. shape is :code:`(N, D)`).
Returns
--------
:class:`numpy.ndarray`
Inverse-ALR transformed array, of shape :code:`(N, D)`.
"""
assert Y.ndim in [1, 2]
X = Y.copy()
dimensions = X.shape[X.ndim - 1]
if not null_col:
idx = np.arange(0, dimensions + 1)
if ind != -1:
idx = np.array(list(idx[idx < ind]) + [-1] + list(idx[idx >= ind + 1] - 1))
# Add a zero-column and reorder columns
if Y.ndim == 2:
X = np.concatenate((X, np.zeros((X.shape[0], 1))), axis=1)
X = X[:, idx]
else:
X = np.append(X, np.array([0]))
X = X[idx]
# Inverse log and closure operations
X = np.exp(X)
X = close(X)
return X
def clr(X: np.ndarray):
"""
Centred Log Ratio transformation.
Parameters
---------------
X : :class:`numpy.ndarray`
Array on which to perform the transformation, of shape :code:`(N, D)`.
Returns
---------
:class:`numpy.ndarray`
CLR-transformed array, of shape :code:`(N, D)`.
"""
X = np.divide(X, np.sum(X, axis=1)[:, np.newaxis]) # Closure operation
Y = np.log(X) # Log operation
Y -= 1 / X.shape[1] * np.nansum(Y, axis=1)[:, np.newaxis]
return Y
def inverse_clr(Y: np.ndarray):
"""
Inverse Centred Log Ratio transformation.
Parameters
---------------
Y : :class:`numpy.ndarray`
Array on which to perform the inverse transformation, of shape :code:`(N, D)`.
Returns
---------
:class:`numpy.ndarray`
Inverse-CLR transformed array, of shape :code:`(N, D)`.
"""
# Inverse of log operation
X = np.exp(Y)
# Closure operation
X = np.divide(X, np.nansum(X, axis=1)[:, np.newaxis])
return X
def ilr(X: np.ndarray):
"""
Isometric Log Ratio transformation.
Parameters
---------------
X : :class:`numpy.ndarray`
Array on which to perform the transformation, of shape :code:`(N, D)`.
Returns
--------
:class:`numpy.ndarray`
ILR-transformed array, of shape :code:`(N, D-1)`.
"""
d = X.shape[1]
Y = clr(X)
psi = orthogonal_basis_from_array(X) # Get a basis
assert np.allclose(psi @ psi.T, np.eye(d - 1))
return Y @ psi.T
def inverse_ilr(Y: np.ndarray, X: np.ndarray = None):
"""
Inverse Isometric Log Ratio transformation.
Parameters
---------------
Y : :class:`numpy.ndarray`
Array on which to perform the inverse transformation, of shape :code:`(N, D-1)`.
X : :class:`numpy.ndarray`, :code:`None`
Optional specification for an array from which to derive the orthonormal basis,
with shape :code:`(N, D)`.
Returns
--------
:class:`numpy.ndarray`
Inverse-ILR transformed array, of shape :code:`(N, D)`.
"""
if X is None:
psi = orthogonal_basis_default(D=Y.shape[1] + 1)
else:
psi = orthogonal_basis_from_array(X)
C = Y @ psi
X = inverse_clr(C) # Inverse log operation
return X
def boxcox(
X: np.ndarray,
lmbda=None,
lmbda_search_space=(-1, 5),
search_steps=100,
return_lmbda=False,
):
"""
Box-Cox transformation.
Parameters
---------------
X : :class:`numpy.ndarray`
Array on which to perform the transformation.
lmbda : :class:`numpy.number`, :code:`None`
Lambda value used to forward-transform values. If none, it will be calculated
using the mean
lmbda_search_space : :class:`tuple`
Range tuple (min, max).
search_steps : :class:`int`
Steps for lambda search range.
return_lmbda : :class:`bool`
Whether to also return the lambda value.
Returns
-------
:class:`numpy.ndarray` | :class:`numpy.ndarray`(:class:`float`)
Box-Cox transformed array. If `return_lmbda` is true, tuple contains data and
lambda value.
"""
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
_X = X.values
else:
_X = X.copy()
if lmbda is None:
l_search = np.linspace(*lmbda_search_space, search_steps)
llf = np.apply_along_axis(scpstats.boxcox_llf, 0, np.array([l_search]), _X.T)
if llf.shape[0] == 1:
mean_llf = llf[0]
else:
mean_llf = np.nansum(llf, axis=0)
lmbda = l_search[mean_llf == np.nanmax(mean_llf)]
if _X.ndim < 2:
out = scpstats.boxcox(_X, lmbda)
elif _X.shape[0] == 1:
out = scpstats.boxcox(np.squeeze(_X), lmbda)
else:
out = np.apply_along_axis(scpstats.boxcox, 0, _X, lmbda)
if isinstance(_X, pd.DataFrame) or isinstance(_X, pd.Series):
_out = X.copy()
_out.loc[:, :] = out
out = _out
if return_lmbda:
return out, lmbda
else:
return out
def inverse_boxcox(Y: np.ndarray, lmbda):
"""
Inverse Box-Cox transformation.
Parameters
---------------
Y : :class:`numpy.ndarray`
Array on which to perform the transformation.
lmbda : :class:`float`
Lambda value used to forward-transform values.
Returns
-------
:class:`numpy.ndarray`
Inverse Box-Cox transformed array.
"""
return scpspec.inv_boxcox(Y, lmbda)
def logratiomean(df, transform=clr, inverse_transform=inverse_clr):
"""
Take a mean of log-ratios along the index of a dataframe.
Parameters
-----------
df : :class:`pandas.DataFrame`
Dataframe from which to compute a mean along the index.
transform : :class:`callable`
Log transform to use.
inverse_transform : :class:`callable`
Inverse of log transform.
Returns
---------
:class:`pandas.Series`
Mean values as a pandas series.
"""
return pd.Series(
inverse_transform(np.mean(transform(df.values), axis=0)[np.newaxis, :])[0],
index=df.columns,
)
| 26.8 | 88 | 0.578953 |
7946dc1f42a18eb7e6cefea59ca3edb384bf7eca | 27,016 | py | Python | tf_quant_finance/models/hjm/swaption_pricing.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 3,138 | 2019-07-24T21:43:17.000Z | 2022-03-30T12:11:09.000Z | tf_quant_finance/models/hjm/swaption_pricing.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 63 | 2019-09-07T19:16:03.000Z | 2022-03-29T19:29:40.000Z | tf_quant_finance/models/hjm/swaption_pricing.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 423 | 2019-07-26T21:28:05.000Z | 2022-03-26T13:07:44.000Z | # Lint as: python3
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pricing of the Interest rate Swaption using the HJM model."""
from typing import Callable, Union
import tensorflow.compat.v2 as tf
from tf_quant_finance import types
from tf_quant_finance.math import pde
from tf_quant_finance.math import random
from tf_quant_finance.models import utils
from tf_quant_finance.models import valuation_method as vm
from tf_quant_finance.models.hjm import gaussian_hjm
from tf_quant_finance.models.hjm import quasi_gaussian_hjm
from tf_quant_finance.models.hjm import swaption_util
__all__ = [
'price'
]
# Points smaller than this are merged together in FD time grid
_PDE_TIME_GRID_TOL = 1e-7
def price(
*,
expiries: types.RealTensor,
fixed_leg_payment_times: types.RealTensor,
fixed_leg_daycount_fractions: types.RealTensor,
fixed_leg_coupon: types.RealTensor,
reference_rate_fn: Callable[..., types.RealTensor],
num_hjm_factors: int,
mean_reversion: types.RealTensor,
volatility: Union[types.RealTensor, Callable[..., types.RealTensor]],
times: types.RealTensor = None,
time_step: types.RealTensor = None,
num_time_steps: types.IntTensor = None,
curve_times: types.RealTensor = None,
corr_matrix: types.RealTensor = None,
notional: types.RealTensor = None,
is_payer_swaption: types.BoolTensor = None,
valuation_method: vm.ValuationMethod = vm.ValuationMethod.MONTE_CARLO,
num_samples: types.IntTensor = 1,
random_type: random.RandomType = None,
seed: types.IntTensor = None,
skip: types.IntTensor = 0,
time_step_finite_difference: types.IntTensor = None,
num_time_steps_finite_difference: types.IntTensor = None,
num_grid_points_finite_difference: types.IntTensor = 101,
dtype: tf.DType = None,
name: str = None) -> types.RealTensor:
"""Calculates the price of European swaptions using the HJM model.
A European Swaption is a contract that gives the holder an option to enter a
swap contract at a future date at a prespecified fixed rate. A swaption that
grants the holder the right to pay fixed rate and receive floating rate is
called a payer swaption while the swaption that grants the holder the right to
receive fixed and pay floating payments is called the receiver swaption.
Typically the start date (or the inception date) of the swap coincides with
the expiry of the swaption. Mid-curve swaptions are currently not supported
(b/160061740).
This implementation uses the HJM model to numerically value European
swaptions. For more information on the formulation of the HJM model, see
quasi_gaussian_hjm.py.
#### Example
````python
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
dtype = tf.float64
# Price 1y x 1y swaption with quarterly payments using Monte Carlo
# simulations.
expiries = np.array([1.0])
fixed_leg_payment_times = np.array([1.25, 1.5, 1.75, 2.0])
fixed_leg_daycount_fractions = 0.25 * np.ones_like(fixed_leg_payment_times)
fixed_leg_coupon = 0.011 * np.ones_like(fixed_leg_payment_times)
zero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
mean_reversion = [0.03]
volatility = [0.02]
price = tff.models.hjm.swaption_price(
expiries=expiries,
fixed_leg_payment_times=fixed_leg_payment_times,
fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,
fixed_leg_coupon=fixed_leg_coupon,
reference_rate_fn=zero_rate_fn,
notional=100.,
num_hjm_factors=1,
mean_reversion=mean_reversion,
volatility=volatility,
valuation_method=tff.model.ValuationMethod.MONTE_CARLO,
num_samples=500000,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2])
# Expected value: [[0.716]]
````
#### References:
[1]: D. Brigo, F. Mercurio. Interest Rate Models-Theory and Practice.
Second Edition. 2007. Section 6.7, page 237.
Args:
expiries: A real `Tensor` of any shape and dtype. The time to expiration of
the swaptions. The shape of this input along with the batch shape of the
HJM model determines the number (and shape) of swaptions to be priced and
the shape of the output. If the batch shape of HJM models is
`model_batch_shape`, then the leading dimensions of `expiries` must be
broadcastable to `model_batch_shape`. For example, if the rank of
`model_batch_shape` is `n` and the rank of `expiries.shape` is `m`, then
`m>=n` and the leading `n` dimensions of `expiries.shape` must be
broadcastable to `model_batch_shape`.
fixed_leg_payment_times: A real `Tensor` of the same dtype as `expiries`.
The payment times for each payment in the fixed leg. The shape of this
input should be `expiries.shape + [n]` where `n` denotes the number of
fixed payments in each leg. The `fixed_leg_payment_times` should be
greater-than or equal-to the corresponding expiries.
fixed_leg_daycount_fractions: A real `Tensor` of the same dtype and
compatible shape as `fixed_leg_payment_times`. The daycount fractions for
each payment in the fixed leg.
fixed_leg_coupon: A real `Tensor` of the same dtype and compatible shape as
`fixed_leg_payment_times`. The fixed rate for each payment in the fixed
leg.
reference_rate_fn: A Python callable that accepts expiry time as a real
`Tensor` and returns a `Tensor` of shape
`model_batch_shape + input_shape`. Returns the continuously compounded
zero rate at the present time for the input expiry time.
num_hjm_factors: A Python scalar which corresponds to the number of factors
in the batch of HJM models to be used for pricing.
mean_reversion: A real positive `Tensor` of shape
`model_batch_shape + [num_hjm_factors]`.
Corresponds to the mean reversion rate of each factor in the batch.
volatility: A real positive `Tensor` of the same `dtype` and shape as
`mean_reversion` or a callable with the following properties:
(a) The callable should accept a scalar `Tensor` `t` and a `Tensor`
`r(t)` of shape `batch_shape + [num_samples]` and returns a `Tensor` of
shape compatible with `batch_shape + [num_samples, dim]`. The variable
`t` stands for time and `r(t)` is the short rate at time `t`. The
function returns instantaneous volatility `sigma(t) = sigma(t, r(t))`.
When `volatility` is specified as a real `Tensor`, each factor is
assumed to have a constant instantaneous volatility and the model is
effectively a Gaussian HJM model.
Corresponds to the instantaneous volatility of each factor.
times: An optional rank 1 `Tensor` of increasing positive real values. The
times at which Monte Carlo simulations are performed. Relevant when
swaption valuation is done using Monte Calro simulations.
Default value: `None` in which case simulation times are computed based
on either `time_step` or `num_time_steps` inputs.
time_step: Optional scalar real `Tensor`. Maximal distance between time
grid points in Euler scheme. Relevant when Euler scheme is used for
simulation. This input or `num_time_steps` are required when valuation
method is Monte Carlo.
Default Value: `None`.
num_time_steps: An optional scalar integer `Tensor` - a total number of
time steps during Monte Carlo simulations. The maximal distance betwen
points in grid is bounded by
`times[-1] / (num_time_steps - times.shape[0])`.
Either this or `time_step` should be supplied when the valuation method
is Monte Carlo.
Default value: `None`.
curve_times: An optional rank 1 `Tensor` of positive and increasing real
values. The maturities at which spot discount curve is computed during
simulations.
Default value: `None` in which case `curve_times` is computed based on
swaption expiries and `fixed_leg_payments_times` inputs.
corr_matrix: A `Tensor` of shape `[num_hjm_factors, num_hjm_factors]` and
the same `dtype` as `mean_reversion`. Specifies the correlation between
HJM factors.
Default value: `None` in which case the factors are assumed to be
uncorrelated.
notional: An optional `Tensor` of same dtype and compatible shape as
`strikes`specifying the notional amount for the underlying swaps.
Default value: None in which case the notional is set to 1.
is_payer_swaption: A boolean `Tensor` of a shape compatible with `expiries`.
Indicates whether the swaption is a payer (if True) or a receiver (if
False) swaption. If not supplied, payer swaptions are assumed.
valuation_method: An enum of type `ValuationMethod` specifying
the method to be used for swaption valuation. Currently the valuation is
supported using `MONTE_CARLO` and `FINITE_DIFFERENCE` methods. Valuation
using finite difference is only supported for Gaussian HJM models, i.e.
for models with constant mean-reversion rate and time-dependent
volatility.
Default value: `ValuationMethod.MONTE_CARLO`, in which case
swaption valuation is done using Monte Carlo simulations.
num_samples: Positive scalar `int32` `Tensor`. The number of simulation
paths during Monte-Carlo valuation. This input is ignored during analytic
valuation.
Default value: The default value is 1.
random_type: Enum value of `RandomType`. The type of (quasi)-random number
generator to use to generate the simulation paths. This input is relevant
only for Monte-Carlo valuation and ignored during analytic valuation.
Default value: `None` which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is only relevant if
`random_type` is one of `[STATELESS, PSEUDO, HALTON_RANDOMIZED,
PSEUDO_ANTITHETIC, STATELESS_ANTITHETIC]`. For `PSEUDO`,
`PSEUDO_ANTITHETIC` and `HALTON_RANDOMIZED` the seed should be an Python
integer. For `STATELESS` and `STATELESS_ANTITHETIC` must be supplied as
an integer `Tensor` of shape `[2]`. This input is relevant only for
Monte-Carlo valuation and ignored during analytic valuation.
Default value: `None` which means no seed is set.
skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
Halton sequence to skip. Used only when `random_type` is 'SOBOL',
'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
Default value: `0`.
time_step_finite_difference: Optional scalar real `Tensor`. Spacing between
time grid points in finite difference discretization. This input is only
relevant for valuation using finite difference.
Default value: `None`. If `num_time_steps_finite_difference` is also
unspecified then a `time_step` corresponding to 100 discretization steps
is used.
num_time_steps_finite_difference: Optional scalar real `Tensor`. Number of
time grid points in finite difference discretization. This input is only
relevant for valuation using finite difference.
Default value: `None`. If `time_step_finite_difference` is also
unspecified, then 100 time steps are used.
num_grid_points_finite_difference: Optional scalar real `Tensor`. Number of
spatial grid points per dimension. Currently, we construct an uniform grid
for spatial discretization. This input is only relevant for valuation
using finite difference.
Default value: 101.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name to give to the ops created by this function.
Default value: `None` which maps to the default name `hjm_swaption_price`.
Returns:
A `Tensor` of real dtype and shape derived from `model_batch_shape` and
expiries.shape containing the computed swaption prices. The shape of the
output is as follows:
* If the `model_batch_shape` is [], then the shape of the output is
expiries.shape
* Otherwise, the shape of the output is
`model_batch_shape + expiries.shape[model_batch_shape.rank:]`
For swaptions that have reset in the past (expiries<0), the function sets
the corresponding option prices to 0.0.
"""
# TODO(b/160061740): Extend the functionality to support mid-curve swaptions.
name = name or 'hjm_swaption_price'
with tf.name_scope(name):
expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')
dtype = dtype or expiries.dtype
fixed_leg_payment_times = tf.convert_to_tensor(
fixed_leg_payment_times, dtype=dtype, name='fixed_leg_payment_times')
fixed_leg_daycount_fractions = tf.convert_to_tensor(
fixed_leg_daycount_fractions,
dtype=dtype,
name='fixed_leg_daycount_fractions')
fixed_leg_coupon = tf.convert_to_tensor(
fixed_leg_coupon, dtype=dtype, name='fixed_leg_coupon')
notional = tf.convert_to_tensor(notional, dtype=dtype, name='notional')
if is_payer_swaption is None:
is_payer_swaption = True
is_payer_swaption = tf.convert_to_tensor(
is_payer_swaption, dtype=tf.bool, name='is_payer_swaption')
if expiries.shape.rank < fixed_leg_payment_times.shape.rank - 1:
raise ValueError('Swaption expiries not specified for all swaptions '
'in the batch. Expected rank {} but received {}.'.format(
fixed_leg_payment_times.shape.rank - 1,
expiries.shape.rank))
# Add a dimension corresponding to multiple cashflows in a swap
expiries = tf.expand_dims(expiries, axis=-1)
# Expected shape: batch_shape + [m], where m is the number of fixed leg
# payments per underlying swap. This is the same as
# fixed_leg_payment_times.shape
#
# We need to explicitly use tf.repeat because we need to price
# batch_shape + [m] bond options with different strikes along the last
# dimension.
expiries = tf.repeat(
expiries, tf.shape(fixed_leg_payment_times)[-1], axis=-1)
if valuation_method == vm.ValuationMethod.FINITE_DIFFERENCE:
model = gaussian_hjm.GaussianHJM(
num_hjm_factors,
mean_reversion=mean_reversion,
volatility=volatility,
initial_discount_rate_fn=reference_rate_fn,
corr_matrix=corr_matrix,
dtype=dtype)
# TODO(b/192294347): Enable pricing using batch of HJM models.
if reference_rate_fn(tf.constant([0.0], dtype=dtype)).shape.rank > 1:
raise ValueError('Pricing swaptions using a batch of HJM models with '
'finite differences is not currently supported.')
instrument_batch_shape = expiries.shape.as_list()[:-1] or [1]
return _european_swaption_fd(
instrument_batch_shape,
model,
# Add a dimension to denote ONE exercise date
tf.expand_dims(expiries, axis=-2),
fixed_leg_payment_times,
fixed_leg_daycount_fractions,
fixed_leg_coupon,
notional,
is_payer_swaption,
time_step_finite_difference,
num_time_steps_finite_difference,
num_grid_points_finite_difference,
name + '_fd',
dtype)
elif valuation_method == vm.ValuationMethod.MONTE_CARLO:
# Monte-Carlo pricing
model = quasi_gaussian_hjm.QuasiGaussianHJM(
num_hjm_factors,
mean_reversion=mean_reversion,
volatility=volatility,
initial_discount_rate_fn=reference_rate_fn,
corr_matrix=corr_matrix,
dtype=dtype)
return _european_swaption_mc(
model, expiries, fixed_leg_payment_times,
fixed_leg_daycount_fractions, fixed_leg_coupon, notional,
is_payer_swaption, times, time_step, num_time_steps, curve_times,
num_samples, random_type, skip, seed, dtype, name + '_mc')
else:
raise ValueError('Swaption Valuation using {} is not supported'.format(
str(valuation_method)))
def _european_swaption_mc(model, expiries,
fixed_leg_payment_times, fixed_leg_daycount_fractions,
fixed_leg_coupon, notional, is_payer_swaption, times,
time_step, num_time_steps, curve_times, num_samples,
random_type, skip, seed, dtype, name):
"""Price European swaptions using Monte-Carlo."""
with tf.name_scope(name):
if (times is None) and (time_step is None) and (num_time_steps is None):
raise ValueError(
'One of `times`, `time_step` or `num_time_steps` must be '
'provided for simulation based swaption valuation.')
def _sample_discount_curve_path_fn(times, curve_times, num_samples):
p_t_tau, r_t, df = model.sample_discount_curve_paths(
times=times,
curve_times=curve_times,
num_samples=num_samples,
random_type=random_type,
time_step=time_step,
num_time_steps=num_time_steps,
seed=seed,
skip=skip)
p_t_tau = tf.expand_dims(p_t_tau, axis=-1)
r_t = tf.expand_dims(r_t, axis=-1)
df = tf.expand_dims(df, axis=-1)
return p_t_tau, r_t, df
payoff_discount_factors, payoff_bond_price = (
swaption_util.discount_factors_and_bond_prices_from_samples(
expiries=expiries,
payment_times=fixed_leg_payment_times,
sample_discount_curve_paths_fn=_sample_discount_curve_path_fn,
num_samples=num_samples,
times=times,
curve_times=curve_times,
dtype=dtype))
# Add an axis corresponding to `dim`
fixed_leg_pv = tf.expand_dims(
fixed_leg_coupon * fixed_leg_daycount_fractions,
axis=-1) * payoff_bond_price
# Sum fixed coupon payments within each swap.
# Here, axis=-2 is the payments axis - i.e. summing over all payments; and
# the last axis is the `dim` axis, as explained in comment above
# `fixed_leg_pv` (Note that for HJM the dim of this axis is 1 always).
fixed_leg_pv = tf.math.reduce_sum(fixed_leg_pv, axis=-2)
float_leg_pv = 1.0 - payoff_bond_price[..., -1, :]
payoff_swap = payoff_discount_factors[..., -1, :] * (
float_leg_pv - fixed_leg_pv)
payoff_swap = tf.where(is_payer_swaption, payoff_swap, -1.0 * payoff_swap)
payoff_swaption = tf.math.maximum(payoff_swap, 0.0)
# Average over all simulation paths
option_value = tf.math.reduce_mean(payoff_swaption, axis=0)
return notional * tf.squeeze(option_value, axis=-1)
def _european_swaption_fd(batch_shape, model, exercise_times,
fixed_leg_payment_times, fixed_leg_daycount_fractions,
fixed_leg_coupon, notional, is_payer_swaption,
time_step_fd, num_time_steps_fd, num_grid_points_fd,
name, dtype):
"""Price European swaptions using finite difference."""
with tf.name_scope(name):
dim = model.dim()
x_min = -0.5
x_max = 0.5
# grid.shape = (num_grid_points,2)
grid = pde.grids.uniform_grid(
minimums=[x_min] * dim,
maximums=[x_max] * dim,
sizes=[num_grid_points_fd] * dim,
dtype=dtype)
# TODO(b/186876306): Remove dynamic shapes.
pde_time_grid, pde_time_grid_dt = _create_pde_time_grid(
exercise_times, time_step_fd, num_time_steps_fd, dtype)
maturities, unique_maturities, maturities_shape = (
_create_term_structure_maturities(fixed_leg_payment_times))
num_maturities = tf.shape(unique_maturities)[-1]
x_meshgrid = _coord_grid_to_mesh_grid(grid)
meshgrid_shape = tf.shape(x_meshgrid)
broadcasted_maturities = tf.expand_dims(unique_maturities, axis=0)
num_grid_points = tf.math.reduce_prod(meshgrid_shape[1:])
shape_to_broadcast = tf.concat([meshgrid_shape, [num_maturities]], axis=0)
# Reshape `state_x`, `maturities` to (num_grid_points, num_maturities)
state_x = tf.expand_dims(x_meshgrid, axis=-1)
state_x = tf.broadcast_to(state_x, shape_to_broadcast)
broadcasted_maturities = tf.broadcast_to(broadcasted_maturities,
shape_to_broadcast[1:])
def _get_swap_payoff(payoff_time):
broadcasted_exercise_times = tf.broadcast_to(payoff_time,
shape_to_broadcast[1:])
# Zero-coupon bond curve
zcb_curve = model.discount_bond_price(
tf.transpose(
tf.reshape(state_x, [dim, num_grid_points * num_maturities])),
tf.reshape(broadcasted_exercise_times, [-1]),
tf.reshape(broadcasted_maturities, [-1]))
zcb_curve = tf.reshape(zcb_curve, [num_grid_points, num_maturities])
maturities_index = tf.searchsorted(unique_maturities,
tf.reshape(maturities, [-1]))
zcb_curve = tf.gather(zcb_curve, maturities_index, axis=-1)
# zcb_curve.shape = [num_grid_points] + [maturities_shape]
zcb_curve = tf.reshape(
zcb_curve, tf.concat([[num_grid_points], maturities_shape], axis=0))
# Shape after reduce_sum =
# (num_grid_points, batch_shape)
fixed_leg = tf.math.reduce_sum(
fixed_leg_coupon * fixed_leg_daycount_fractions * zcb_curve, axis=-1)
float_leg = 1.0 - zcb_curve[..., -1]
payoff_swap = float_leg - fixed_leg
payoff_swap = tf.where(is_payer_swaption, payoff_swap, -payoff_swap)
return tf.reshape(
tf.transpose(payoff_swap),
tf.concat([batch_shape, meshgrid_shape[1:]], axis=0))
def _get_index(t, tensor_to_search):
t = tf.expand_dims(t, axis=-1)
index = tf.searchsorted(tensor_to_search, t - _PDE_TIME_GRID_TOL, 'right')
y = tf.gather(tensor_to_search, index)
return tf.where(tf.math.abs(t - y) < _PDE_TIME_GRID_TOL, index, -1)[0]
sum_x_meshgrid = tf.math.reduce_sum(x_meshgrid, axis=0)
def _is_exercise_time(t):
return tf.reduce_any(
tf.where(
tf.math.abs(exercise_times[..., -1] - t) < _PDE_TIME_GRID_TOL,
True, False),
axis=-1)
def _discounting_fn(t, grid):
del grid
f_0_t = (model._instant_forward_rate_fn(t)) # pylint: disable=protected-access
return sum_x_meshgrid + f_0_t
def _final_value():
t = pde_time_grid[-1]
payoff_swap = tf.nn.relu(_get_swap_payoff(t))
is_ex_time = _is_exercise_time(t)
return tf.where(
tf.reshape(is_ex_time, tf.concat([batch_shape, [1] * dim], axis=0)),
payoff_swap, 0.0)
def _values_transform_fn(t, grid, value_grid):
zero = tf.zeros_like(value_grid)
is_ex_time = _is_exercise_time(t)
def _at_least_one_swaption_pays():
payoff_swap = tf.nn.relu(_get_swap_payoff(t))
return tf.where(
tf.reshape(is_ex_time, tf.concat([batch_shape, [1] * dim], axis=0)),
payoff_swap, zero)
v_star = tf.cond(
tf.math.reduce_any(is_ex_time), _at_least_one_swaption_pays,
lambda: zero)
return grid, tf.maximum(value_grid, v_star)
def _pde_time_step(t):
index = _get_index(t, pde_time_grid)
return pde_time_grid_dt[index]
# Use default boundary conditions, d^2V/dx_i^2 = 0
boundary_conditions = [(None, None) for i in range(dim)]
# res[0] contains the swaption prices.
# res[0].shape = batch_shape + [num_grid_points] * dim
res = model.fd_solver_backward(
pde_time_grid[-1],
0.0,
grid,
values_grid=_final_value(),
time_step=_pde_time_step,
boundary_conditions=boundary_conditions,
values_transform_fn=_values_transform_fn,
discounting=_discounting_fn,
dtype=dtype)
idx = tf.searchsorted(
tf.convert_to_tensor(grid),
tf.expand_dims(tf.convert_to_tensor([0.0] * dim, dtype=dtype), axis=-1))
# idx.shape = (dim, 1)
idx = tf.squeeze(idx) if dim > 1 else tf.reshape(idx, shape=[1])
slices = [slice(None)] + [slice(i, i + 1) for i in tf.unstack(idx)]
# shape = batch_shape + [1] * dim
option_value = res[0][slices]
# shape = batch_shape
option_value = tf.squeeze(option_value, axis=list(range(-dim, 0)))
# output_shape = batch_shape
return notional * tf.reshape(option_value, batch_shape)
def _coord_grid_to_mesh_grid(coord_grid):
if len(coord_grid) == 1:
return tf.expand_dims(coord_grid[0], 0)
x_meshgrid = tf.stack(values=tf.meshgrid(*coord_grid, indexing='ij'), axis=-1)
perm = [len(coord_grid)] + list(range(len(coord_grid)))
return tf.transpose(x_meshgrid, perm=perm)
def _create_pde_time_grid(exercise_times, time_step_fd, num_time_steps_fd,
dtype):
"""Create PDE time grid."""
with tf.name_scope('create_pde_time_grid'):
exercise_times, _ = tf.unique(tf.reshape(exercise_times, shape=[-1]))
if num_time_steps_fd is not None:
num_time_steps_fd = tf.convert_to_tensor(
num_time_steps_fd, dtype=tf.int32, name='num_time_steps_fd')
time_step_fd = tf.math.reduce_max(exercise_times) / tf.cast(
num_time_steps_fd, dtype=dtype)
if time_step_fd is None and num_time_steps_fd is None:
num_time_steps_fd = 100
pde_time_grid, _, _ = utils.prepare_grid(
times=exercise_times,
time_step=time_step_fd,
dtype=dtype,
num_time_steps=num_time_steps_fd)
pde_time_grid_dt = pde_time_grid[1:] - pde_time_grid[:-1]
pde_time_grid_dt = tf.concat([[100.0], pde_time_grid_dt], axis=-1)
return pde_time_grid, pde_time_grid_dt
def _create_term_structure_maturities(fixed_leg_payment_times):
"""Create maturities needed for termstructure simulations."""
with tf.name_scope('create_termstructure_maturities'):
maturities = fixed_leg_payment_times
maturities_shape = tf.shape(maturities)
# We should eventually remove tf.unique, but keeping it for now because
# PDE solvers are not xla compatible in TFF currently.
unique_maturities, _ = tf.unique(tf.reshape(maturities, shape=[-1]))
unique_maturities = tf.sort(unique_maturities, name='sort_maturities')
return maturities, unique_maturities, maturities_shape
| 45.867572 | 85 | 0.700733 |
7946dc203416f2713d1e67e1424d11f3493629b1 | 35 | py | Python | rhsa_mapper/__init__.py | programmerchad/rhsa_mapper | 94fedb215697bcbfea5a262bdba471a4de5dce07 | [
"MIT"
] | null | null | null | rhsa_mapper/__init__.py | programmerchad/rhsa_mapper | 94fedb215697bcbfea5a262bdba471a4de5dce07 | [
"MIT"
] | null | null | null | rhsa_mapper/__init__.py | programmerchad/rhsa_mapper | 94fedb215697bcbfea5a262bdba471a4de5dce07 | [
"MIT"
] | null | null | null | from rhsa_mapper.main import rhsam
| 17.5 | 34 | 0.857143 |
7946dc89cbc40d038684841b6e14631bc54aff7a | 5,545 | py | Python | backends/ebpf/targets/ebpfstf.py | anasyrmia/p4c-1 | 2bf2f615fdaaf4efed1f2f8ab0b3f3261cface60 | [
"Apache-2.0"
] | 487 | 2016-12-22T03:33:27.000Z | 2022-03-29T06:36:45.000Z | backends/ebpf/targets/ebpfstf.py | anasyrmia/p4c-1 | 2bf2f615fdaaf4efed1f2f8ab0b3f3261cface60 | [
"Apache-2.0"
] | 2,114 | 2016-12-18T11:36:27.000Z | 2022-03-31T22:33:23.000Z | backends/ebpf/targets/ebpfstf.py | anasyrmia/p4c-1 | 2bf2f615fdaaf4efed1f2f8ab0b3f3261cface60 | [
"Apache-2.0"
] | 456 | 2016-12-20T14:01:11.000Z | 2022-03-30T19:26:05.000Z | #!/usr/bin/env python3
# Copyright 2018 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Converts the commands in an stf file which populate tables into a C
program that manipulates ebpf tables. """
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/../../tools')
from testutils import *
from stf.stf_parser import STFParser
class eBPFCommand(object):
""" Defines a match-action command for eBPF programs"""
def __init__(self, a_type, table, action, priority="", match=[], extra=""):
self.a_type = a_type # dir in which all files are stored
self.table = table # contains meta information
self.action = action # contains meta information
self.priority = priority # template to generate a filter
self.match = match # contains standard and error output
self.extra = extra # could also be "pcapng"
def _generate_control_actions(cmds):
""" Generates the actual control plane commands.
This function inserts C code for all the "add" commands that have
been parsed. """
generated = ""
for index, cmd in enumerate(cmds):
key_name = "key_%s%d" % (cmd.table, index)
value_name = "value_%s%d" % (cmd.table, index)
if cmd.a_type == "setdefault":
tbl_name = cmd.table + "_defaultAction"
generated += "u32 %s = 0;\n\t" % (key_name)
else:
generated += "struct %s_key %s = {};\n\t" % (cmd.table, key_name)
tbl_name = cmd.table
for key_num, key_field in enumerate(cmd.match):
field = key_field[0].split('.')[1]
generated += ("%s.%s = %s;\n\t"
% (key_name, field, key_field[1]))
generated += ("tableFileDescriptor = "
"BPF_OBJ_GET(MAP_PATH \"/%s\");\n\t" %
tbl_name)
generated += ("if (tableFileDescriptor < 0) {"
"fprintf(stderr, \"map %s not loaded\");"
" exit(1); }\n\t" % tbl_name)
generated += ("struct %s_value %s = {\n\t\t" % (
cmd.table, value_name))
generated += ".action = %s,\n\t\t" % (cmd.action[0])
generated += ".u = {.%s = {" % cmd.action[0]
for val_num, val_field in enumerate(cmd.action[1]):
generated += "%s," % val_field[1]
generated += "}},\n\t"
generated += "};\n\t"
generated += ("ok = BPF_USER_MAP_UPDATE_ELEM"
"(tableFileDescriptor, &%s, &%s, BPF_ANY);\n\t"
% (key_name, value_name))
generated += ("if (ok != 0) { perror(\"Could not write in %s\");"
"exit(1); }\n" % tbl_name)
return generated
def create_table_file(actions, tmpdir, file_name):
""" Create the control plane file.
The control commands are provided by the stf parser.
This generated file is required by ebpf_runtime.c to initialize
the control plane. """
err = ""
try:
with open(tmpdir + "/" + file_name, "w+") as control_file:
control_file.write("#include \"test.h\"\n\n")
control_file.write("static inline void setup_control_plane() {")
control_file.write("\n\t")
control_file.write("int ok;\n\t")
control_file.write("int tableFileDescriptor;\n\t")
generated_cmds = _generate_control_actions(actions)
control_file.write(generated_cmds)
control_file.write("}\n")
except OSError as e:
err = e
return FAILURE, err
return SUCCESS, err
def parse_stf_file(raw_stf):
""" Uses the .stf parsing tool to acquire a pre-formatted list.
Processing entries according to their specified cmd. """
parser = STFParser()
stf_str = raw_stf.read()
stf_map, errs = parser.parse(stf_str)
input_pkts = {}
cmds = []
expected = {}
for stf_entry in stf_map:
if stf_entry[0] == "packet":
input_pkts.setdefault(stf_entry[1], []).append(bytes.fromhex(''.join(stf_entry[2].split())))
elif stf_entry[0] == "expect":
interface = int(stf_entry[1])
pkt_data = stf_entry[2]
expected.setdefault(interface, {})
if pkt_data != '':
expected[interface]["any"] = False
expected[interface].setdefault(
"pkts", []).append(pkt_data)
else:
expected[interface]["any"] = True
elif stf_entry[0] == "add":
cmd = eBPFCommand(
a_type=stf_entry[0], table=stf_entry[1],
priority=stf_entry[2], match=stf_entry[3],
action=stf_entry[4], extra=stf_entry[5])
cmds.append(cmd)
elif stf_entry[0] == "setdefault":
cmd = eBPFCommand(
a_type=stf_entry[0], table=stf_entry[1], action=stf_entry[2])
cmds.append(cmd)
return input_pkts, cmds, expected
| 41.380597 | 104 | 0.581785 |
7946dd5489f1c2f18f785031a6574bb3a67cfa97 | 5,703 | py | Python | bindings/python/ensmallen_graph/datasets/networkrepository/sw10040d2trial3.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/networkrepository/sw10040d2trial3.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/networkrepository/sw10040d2trial3.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph SW-100-4-0d2-trial3.
The graph is automatically retrieved from the NetworkRepository repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 09:31:06.488016
The undirected graph SW-100-4-0d2-trial3 has 100 nodes and 200 unweighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04040 and is connected, as it has a single component. The graph median
node degree is 4, the mean node degree is 4.00, and the node degree mode
is 4. The top 5 most central nodes are 83 (degree 6), 62 (degree 6), 12
(degree 6), 94 (degree 5) and 96 (degree 5).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import Sw10040d2Trial3
# Then load the graph
graph = Sw10040d2Trial3()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def Sw10040d2Trial3(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the SW-100-4-0d2-trial3 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of SW-100-4-0d2-trial3 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 09:31:06.488016
The undirected graph SW-100-4-0d2-trial3 has 100 nodes and 200 unweighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04040 and is connected, as it has a single component. The graph median
node degree is 4, the mean node degree is 4.00, and the node degree mode
is 4. The top 5 most central nodes are 83 (degree 6), 62 (degree 6), 12
(degree 6), 94 (degree 5) and 96 (degree 5).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import Sw10040d2Trial3
# Then load the graph
graph = Sw10040d2Trial3()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="Sw10040d2Trial3",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 31.860335 | 94 | 0.682799 |
7946dedb29967a5ff96a8d7cd312b2fd2bc51b15 | 6,859 | py | Python | notebooks/02_crash_severity.py | jennan/crash_prediction | 498b59704ed2aca61c78e4eb7c5558abe9edaffc | [
"MIT"
] | 3 | 2020-12-07T04:07:04.000Z | 2021-08-19T10:41:08.000Z | notebooks/02_crash_severity.py | jennan/crash_prediction | 498b59704ed2aca61c78e4eb7c5558abe9edaffc | [
"MIT"
] | 2 | 2020-12-10T19:12:02.000Z | 2020-12-10T19:12:08.000Z | notebooks/02_crash_severity.py | jennan/crash_prediction | 498b59704ed2aca61c78e4eb7c5558abe9edaffc | [
"MIT"
] | 2 | 2021-04-14T14:32:39.000Z | 2021-12-10T10:36:59.000Z | # # Exploration of the crash severity information in CAS data
#
# In this notebook, we will explore the severity of crashes, as it will be the
# target of our predictive models.
from pathlib import Path
import numpy as np
import pandas as pd
import scipy.stats as st
import matplotlib.pyplot as plt
import seaborn as sb
from crash_prediction import cas_data
# set seaborn default style
sb.set()
# But first, we ensure we have the data or download it if needed
dset_path = Path("..") / "data" / "cas_dataset.csv"
if not dset_path.exists():
dset_path.parent.mkdir(parents=True, exist_ok=True)
cas_data.download(dset_path)
# and load it.
dset = pd.read_csv(dset_path)
dset.head()
# The CAS dataset has 4 features that can be associated with the crash severity:
#
# - `crashSeverity`, severity of a crash, determined by the worst injury
# sustained in the crash at time of entry,
# - `fatalCount`, count of the number of fatal casualties associated with this
# crash,
# - `minorInjuryCount`, count of the number of minor injuries associated with
# this crash,
# - `seriousInjuryCount`, count of the number of serious injuries associated
# with this crash.
severity_features = [
"fatalCount",
"seriousInjuryCount",
"minorInjuryCount",
"crashSeverity",
]
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
for ax, feat in zip(axes.flat, severity_features):
counts = dset[feat].value_counts(dropna=False)
counts.plot.bar(ylabel="# crashes", title=feat, ax=ax)
ax.set(yscale="log")
fig.tight_layout()
# To check the geographical distribution, we will focus on Auckland and replace
# discrete levels of `crashSeverity` with number to ease plotting.
dset_auckland = dset[dset["X"].between(174.7, 174.9) & dset["Y"].between(-37, -36.8)]
mapping = {
"Non-Injury Crash": 1,
"Minor Crash": 2,
"Serious Crash": 3,
"Fatal Crash": 4,
}
dset_auckland = dset_auckland.replace({"crashSeverity": mapping})
# Given the data set imbalance, we plot the local maxima to better see the
# location of more severe car crashes.
fig, axes = plt.subplots(2, 2, figsize=(15, 15))
for ax, feat in zip(axes.flat, severity_features):
dset_auckland.plot.hexbin(
"X",
"Y",
feat,
gridsize=500,
reduce_C_function=np.max,
cmap="BuPu",
title=feat,
ax=ax,
sharex=False,
)
ax.set_xticklabels([])
ax.set_yticklabels([])
fig.tight_layout()
# Few remarks coming from these plots:
#
# - fatal counts are (hopefully) very low,
# - crashes with serious injuries are also very sparse,
# - crashes with minor injuries are denser and seem to follow major axes,
# - the crash severity feature looks like the most homogeneous feature, yet
# highlighting some roads more than others.
#
# The crash severity is probably a good go-to target, as it's quite
# interpretable and actionable. The corresponding ML problem is a supervised
# multi-class prediction problem.
# To simplify the problem, we can also just try to predict if a crash is going
# to involve an injury (minor, severe or fatal) or none. Here is how it would
# look like in Auckland
dset_auckland["injuryCrash"] = (dset_auckland["crashSeverity"] > 1) * 1.0
dset_auckland.plot.hexbin(
"X",
"Y",
"injuryCrash",
gridsize=500,
cmap="BuPu",
title="Crash with injury",
sharex=False,
figsize=(10, 10),
)
# Interestingly, the major axes do not pop up as saliently here, as we are
# averaging instead of taking the local maxima.
# This brings us to to the another question: is the fraction of crash with
# injuries constant fraction of the number of crashes in an area? This would
# imply that a simple binomial model can model locally binned data.
# We first discretize space into 0.01° wide cells and count the total number of
# crashes in each cell as well as the number of crashes with injuries.
# +
dset["X_bin"] = pd.cut(
dset["X"], pd.interval_range(dset.X.min(), dset.X.max(), freq=0.01)
)
dset["Y_bin"] = pd.cut(
dset["Y"], pd.interval_range(dset.Y.min(), dset.Y.max(), freq=0.01)
)
counts = (
dset.groupby(["X_bin", "Y_bin"], observed=True).size().reset_index(name="crash")
)
injury_counts = (
dset.groupby(["X_bin", "Y_bin"], observed=True)
.apply(lambda x: (x["crashSeverity"] != "Non-Injury Crash").sum())
.reset_index(name="injury")
)
counts = counts.merge(injury_counts)
# -
# For each number of crashes in cells, we can check the fraction of crashes with
# injuries. Here we see that cells with 1 or few crashes have a nearly 50/50
# chance of injuries, compared to cells with a larger number of accidents, where
# it goes down to about 20%.
injury_fraction = counts.groupby("crash").apply(
lambda x: x["injury"].sum() / x["crash"].sum()
)
ax = injury_fraction.plot(style=".", ylabel="fraction of injuries", figsize=(10, 7))
ax.set_xscale("log")
# Then we can also check how good is a binomial distribution at modeling binned
# data, using it to derive a 95% predictive interval.
ratio = counts["injury"].sum() / counts["crash"].sum()
xs = np.arange(1, counts["crash"].max() + 1)
pred_intervals = st.binom(xs, ratio).ppf([[0.025], [0.975]])
# +
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
counts.plot.scatter(x="crash", y="injury", alpha=0.3, c="b", s=2, ax=axes[0])
axes[0].fill_between(
xs,
pred_intervals[0],
pred_intervals[1],
alpha=0.3,
color="r",
label="95% equal-tail interval for binomial",
)
axes[0].legend()
counts.plot.scatter(x="crash", y="injury", alpha=0.3, c="b", s=2, ax=axes[1])
axes[1].fill_between(
xs,
pred_intervals[0],
pred_intervals[1],
alpha=0.3,
color="r",
label="95% equal-tail interval for binomial",
)
axes[1].legend()
axes[1].set_xscale("log")
axes[1].set_yscale("log")
# -
# The predictive interval seems to have a poor coverage, overshooting the high
# counts regions and being to narrow for the regions with hundreds of crashes.
# We can compute the empirical coverage of these interval to check this.
counts["covered"] = counts["injury"].between(
pred_intervals[0, counts["crash"] - 1], pred_intervals[1, counts["crash"] - 1]
)
print(f"95% predictive interval has {counts['covered'].mean() * 100:.2f}%.")
print("95% predictive interval coverage per quartile of crash counts:")
mask = counts["crash"] > 1
counts[mask].groupby(pd.qcut(counts.loc[mask, "crash"], 4))["covered"].mean()
# So it turns out that on a macro scale, the coverage of this simple model is
# quite good, but if we split by number of crashes, the coverage isn't so good
# anymore for the cells with higher number of crashes.
#
# Hence, including the number of crashes in a vicinity could be an relevant
# predictor for the probability of crash with injury.
# ---
# ## Original computing environment
# !date -R
# !uname -a
# !pip freeze
| 30.896396 | 85 | 0.697478 |
7946e12304748268eca61f0a6ae06157e1488ec4 | 2,394 | py | Python | wagtail/contrib/wagtailstyleguide/views.py | hanpama/wagtail | e0e3cdc824b1acd9f9daa6d80b5455c969b385dd | [
"BSD-3-Clause"
] | null | null | null | wagtail/contrib/wagtailstyleguide/views.py | hanpama/wagtail | e0e3cdc824b1acd9f9daa6d80b5455c969b385dd | [
"BSD-3-Clause"
] | null | null | null | wagtail/contrib/wagtailstyleguide/views.py | hanpama/wagtail | e0e3cdc824b1acd9f9daa6d80b5455c969b385dd | [
"BSD-3-Clause"
] | 1 | 2019-03-05T15:37:22.000Z | 2019-03-05T15:37:22.000Z | from django import forms
from django.shortcuts import render
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailadmin.widgets import AdminPageChooser, AdminDateInput, AdminTimeInput, AdminDateTimeInput
from wagtail.wagtailimages.widgets import AdminImageChooser
from wagtail.wagtaildocs.widgets import AdminDocumentChooser
class ExampleForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ExampleForm, self).__init__(*args, **kwargs)
self.fields['page_chooser'].widget = AdminPageChooser()
self.fields['image_chooser'].widget = AdminImageChooser()
self.fields['document_chooser'].widget = AdminDocumentChooser()
self.fields['date'].widget = AdminDateInput()
self.fields['time'].widget = AdminTimeInput()
self.fields['datetime'].widget = AdminDateTimeInput()
CHOICES = (
('choice1', 'choice 1'),
('choice2', 'choice 2'),
)
text = forms.CharField(required=True, help_text="help text")
url = forms.URLField(required=True)
email = forms.EmailField(max_length=254)
date = forms.DateField()
time = forms.TimeField()
datetime = forms.DateTimeField()
select = forms.ChoiceField(choices=CHOICES)
boolean = forms.BooleanField(required=False)
page_chooser = forms.BooleanField(required=True)
image_chooser = forms.BooleanField(required=True)
document_chooser = forms.BooleanField(required=True)
def index(request):
form = SearchForm(placeholder=_("Search something"))
example_form = ExampleForm()
messages.success(request, _("Success message"), buttons=[
messages.button('', _('View live')),
messages.button('', _('Edit'))
])
messages.warning(request, _("Warning message"), buttons=[
messages.button('', _('View live')),
messages.button('', _('Edit'))
])
messages.error(request, _("Error message"), buttons=[
messages.button('', _('View live')),
messages.button('', _('Edit'))
])
paginator = Paginator(list(range(100)), 10)
page = paginator.page(2)
return render(request, 'wagtailstyleguide/base.html', {
'search_form': form,
'example_form': example_form,
'example_page': page,
})
| 35.205882 | 109 | 0.688805 |
7946e1da26e31003cfd09c19b3079fe28995bc79 | 1,539 | py | Python | statement_parser/validators/total.py | jamiehannaford/statement-parser | 93925b5903a4570f66f3e7b7d5d839412bde1da0 | [
"MIT"
] | 5 | 2021-09-01T03:27:02.000Z | 2022-03-31T16:31:23.000Z | statement_parser/validators/total.py | jamiehannaford/statement-parser | 93925b5903a4570f66f3e7b7d5d839412bde1da0 | [
"MIT"
] | null | null | null | statement_parser/validators/total.py | jamiehannaford/statement-parser | 93925b5903a4570f66f3e7b7d5d839412bde1da0 | [
"MIT"
] | null | null | null | import math
from xbrl.instance import NumericFact
class TotalValidator:
def __init__(self, instance):
self.instance = instance
def isclose(self, a, b, sf=None):
if sf:
return math.isclose(a, b, abs_tol=10**-sf)
else:
return math.isclose(a, b)
def check_list_sums_to_n(self, elems, n):
elem_sum = sum(elem.value for elem in elems)
return self.isclose(n.value, elem_sum, n.decimals)
def process(self, elements):
concept_map = {}
concept_val = {}
for elem in elements:
concept_id = elem.concept.xml_id
if concept_id not in concept_map:
concept_map[concept_id] = []
concept_val[concept_id] = []
if elem.value in concept_val[concept_id]:
continue
concept_map[concept_id].append(elem)
concept_val[concept_id].append(elem.value)
output = []
for concept, elems in concept_map.items():
elems.sort(key=lambda elem: elem.value)
# check highest
if isinstance(elems[-1], NumericFact) and self.check_list_sums_to_n(elems[:-1], elems[-1]):
output.append(elems[-1])
continue
# check lowest
if isinstance(elems[0], NumericFact) and self.check_list_sums_to_n(elems[1:], elems[0]):
output.append(elems[0])
continue
output.extend(elems)
return output | 30.78 | 103 | 0.564003 |
7946e3e6bb1b2c2257bc3ddbd58b9908807462ce | 1,501 | py | Python | tests/spot/mining/test_mining_worker.py | fossabot/binance-connector-python | bab18df22ba57b407b15dd0a9147cd75e6389b9e | [
"MIT"
] | 1 | 2021-08-05T03:36:24.000Z | 2021-08-05T03:36:24.000Z | tests/spot/mining/test_mining_worker.py | fossabot/binance-connector-python | bab18df22ba57b407b15dd0a9147cd75e6389b9e | [
"MIT"
] | 2 | 2021-07-12T11:18:55.000Z | 2021-07-12T11:28:19.000Z | tests/spot/mining/test_mining_worker.py | fossabot/binance-connector-python | bab18df22ba57b407b15dd0a9147cd75e6389b9e | [
"MIT"
] | 1 | 2021-07-10T20:50:04.000Z | 2021-07-10T20:50:04.000Z | import responses
from urllib.parse import urlencode
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
from binance.error import ParameterRequiredError, ClientError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
def test_mining_worker_without_algo():
"""Tests the API endpoint to get worker without algo"""
client = Client(key, secret)
client.mining_worker.when.called_with("", "test_name", "worker_name").should.throw(
ParameterRequiredError
)
def test_mining_worker_without_username():
"""Tests the API endpoint to get worker without username"""
client = Client(key, secret)
client.mining_worker.when.called_with("sha256", "", "worker_name").should.throw(
ParameterRequiredError
)
def test_mining_worker_without_workername():
"""Tests the API endpoint to get worker without workername"""
client = Client(key, secret)
client.mining_worker.when.called_with("sha256", "test_name", "").should.throw(
ParameterRequiredError
)
@mock_http_response(
responses.GET,
"/sapi/v1/mining/worker/detail\\?algo=sha256&userName=user_name&workerName=worker_name",
mock_item,
200,
)
def test_mining_worker():
"""Tests the API endpoint to get coin list"""
client = Client(key, secret)
response = client.mining_worker("sha256", "user_name", "worker_name")
response.should.equal(mock_item)
| 27.796296 | 92 | 0.726849 |
7946e5482b750680a3cad702affab9514421a401 | 2,936 | py | Python | paramak/parametric_components/center_column_circular.py | billingsley-john/paramak | 127d064f7bc0fd26305b4d83776d66b0e12aeeb0 | [
"MIT"
] | null | null | null | paramak/parametric_components/center_column_circular.py | billingsley-john/paramak | 127d064f7bc0fd26305b4d83776d66b0e12aeeb0 | [
"MIT"
] | null | null | null | paramak/parametric_components/center_column_circular.py | billingsley-john/paramak | 127d064f7bc0fd26305b4d83776d66b0e12aeeb0 | [
"MIT"
] | null | null | null |
from typing import Optional, Tuple
from paramak import RotateMixedShape
class CenterColumnShieldCircular(RotateMixedShape):
"""A center column shield volume with a circular outer profile and constant
cylindrical inner profile.
Args:
height: height of the center column shield (cm).
inner_radius: the inner radius of the center column shield (cm).
mid_radius: the inner radius of the outer hyperbolic profile of the center
colunn shield (cm).
outer_radius: the outer radius of the center column shield.
stp_filename: Defaults to "CenterColumnShieldCircular.stp".
stl_filename: Defaults to "CenterColumnShieldCircular.stl".
name: Defaults to "center_column_shield".
material_tag: Defaults to "center_column_shield_mat".
"""
def __init__(
self,
height: float,
inner_radius: float,
mid_radius: float,
outer_radius: float,
stp_filename: Optional[str] = "CenterColumnShieldCircular.stp",
stl_filename: Optional[str] = "CenterColumnShieldCircular.stl",
material_tag: Optional[str] = "center_column_shield_mat",
color: Optional[Tuple[float, float, float,
Optional[float]]] = (0., 0.333, 0.),
**kwargs
) -> None:
super().__init__(
material_tag=material_tag,
stp_filename=stp_filename,
stl_filename=stl_filename,
color=color,
**kwargs
)
self.height = height
self.inner_radius = inner_radius
self.mid_radius = mid_radius
self.outer_radius = outer_radius
@property
def height(self):
return self._height
@height.setter
def height(self, height):
self._height = height
@property
def inner_radius(self):
return self._inner_radius
@inner_radius.setter
def inner_radius(self, inner_radius):
self._inner_radius = inner_radius
@property
def mid_radius(self):
return self._mid_radius
@mid_radius.setter
def mid_radius(self, mid_radius):
self._mid_radius = mid_radius
@property
def outer_radius(self):
return self._outer_radius
@outer_radius.setter
def outer_radius(self, outer_radius):
self._outer_radius = outer_radius
def find_points(self):
"""Finds the XZ points and connection types (straight and circle) that
describe the 2D profile of the center column shield shape."""
points = [
(self.inner_radius, 0, "straight"),
(self.inner_radius, self.height / 2, "straight"),
(self.outer_radius, self.height / 2, "circle"),
(self.mid_radius, 0, "circle"),
(self.outer_radius, -self.height / 2, "straight"),
(self.inner_radius, -self.height / 2, "straight")
]
self.points = points
| 30.905263 | 82 | 0.633515 |
7946e605e141a38b13ce9411d114e04308c8a19f | 460 | py | Python | StringMe.py | akey7/supreme-enigma | a724c1cd26f6fc82d7c3f988a62d9bfefed6f750 | [
"MIT"
] | null | null | null | StringMe.py | akey7/supreme-enigma | a724c1cd26f6fc82d7c3f988a62d9bfefed6f750 | [
"MIT"
] | 6 | 2019-08-30T04:08:52.000Z | 2019-08-30T15:23:48.000Z | StringMe.py | akey7/supreme-enigma | a724c1cd26f6fc82d7c3f988a62d9bfefed6f750 | [
"MIT"
] | null | null | null | class StringMe:
def __init__(self, source):
self.source = source
def is_palindrome(self, test_me=None):
if test_me is None:
test_me = self.source
if len(test_me) < 2:
return True
elif test_me[0] != test_me[-1]:
return False
else:
return self.is_palindrome(test_me[1:-1])
def isBalanced(self):
pass
def reflect(self):
return self.source
| 21.904762 | 52 | 0.552174 |
7946e64f2ee649e2b91285c3654eb78ff026e7ce | 2,201 | py | Python | run.py | semihcanturk/PyTorch-VAE | ee881337f75cc47f54e164142073714eed43dd0a | [
"Apache-2.0"
] | null | null | null | run.py | semihcanturk/PyTorch-VAE | ee881337f75cc47f54e164142073714eed43dd0a | [
"Apache-2.0"
] | null | null | null | run.py | semihcanturk/PyTorch-VAE | ee881337f75cc47f54e164142073714eed43dd0a | [
"Apache-2.0"
] | null | null | null | import yaml
import argparse
import numpy as np
from models import *
from experiment import VAEXperiment
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TestTubeLogger
from pytorch_lightning.callbacks import ModelCheckpoint
if __name__ == '__main__':
torch.manual_seed(0)
np.random.seed(0)
parser = argparse.ArgumentParser(description='Generic runner for VAE models')
parser.add_argument('--config', '-c',
dest="filename",
metavar='FILE',
help='path to the config file',
default='./configs/cvae.yaml')
args = parser.parse_args()
with open(args.filename, 'r') as file:
try:
config = yaml.safe_load(file)
except yaml.YAMLError as exc:
print(exc)
tt_logger = TestTubeLogger(
save_dir=config['logging_params']['save_dir'],
name=config['logging_params']['name'],
debug=False,
create_git_tag=False,
)
# For reproducibility
torch.manual_seed(config['logging_params']['manual_seed'])
np.random.seed(config['logging_params']['manual_seed'])
cudnn.deterministic = True
cudnn.benchmark = False
model = vae_models[config['model_params']['name']](**config['model_params'])
experiment = VAEXperiment(model,
config['exp_params'])
checkpoint_callback = ModelCheckpoint(monitor='val_loss', filename=config['exp_params']['dataset'] + '-{epoch:02d}-{val_loss:.2f}', save_top_k=-1)
runner = Trainer(default_root_dir=f"{tt_logger.save_dir}",
checkpoint_callback=True,
min_epochs=1,
logger=tt_logger,
log_every_n_steps=100,
limit_train_batches=1.,
val_check_interval=1.,
num_sanity_val_steps=5,
callbacks=[checkpoint_callback],
**config['trainer_params'])
print(f"======= Training {config['model_params']['name']} =======")
print(config['exp_params']['LR'])
runner.fit(experiment)
| 34.390625 | 150 | 0.606542 |
7946e6ed355d68d1b718da4578a4d9ae9ff16584 | 458 | py | Python | models/dummynet.py | LendelTheGreat/weak-segmentation | 0ff6015f1af741cfb50ef8fb6f55cea822f68f7a | [
"MIT"
] | 1 | 2020-11-04T03:00:44.000Z | 2020-11-04T03:00:44.000Z | models/dummynet.py | LendelTheGreat/weak-segmentation | 0ff6015f1af741cfb50ef8fb6f55cea822f68f7a | [
"MIT"
] | null | null | null | models/dummynet.py | LendelTheGreat/weak-segmentation | 0ff6015f1af741cfb50ef8fb6f55cea822f68f7a | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class DummyNet(nn.Module):
def __init__(self):
super(DummyNet, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(10, 5, kernel_size=5, padding=2)
self.softmax = nn.Softmax2d()
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.conv2(x)
x = self.softmax(x)
return x
| 26.941176 | 63 | 0.60917 |
7946e9e1061e7f3c202f7300dd0bf44dc923b516 | 520 | py | Python | day2/netmikox/collateral/netmiko_example1.py | austind/pyplus-ons | f0fcd6b2a980f75968ab54cd2ae39b42c1f68302 | [
"Apache-2.0"
] | null | null | null | day2/netmikox/collateral/netmiko_example1.py | austind/pyplus-ons | f0fcd6b2a980f75968ab54cd2ae39b42c1f68302 | [
"Apache-2.0"
] | null | null | null | day2/netmikox/collateral/netmiko_example1.py | austind/pyplus-ons | f0fcd6b2a980f75968ab54cd2ae39b42c1f68302 | [
"Apache-2.0"
] | 5 | 2019-11-19T18:41:41.000Z | 2020-06-18T14:58:09.000Z | #!/usr/bin/env python
from getpass import getpass
from netmiko import ConnectHandler
if __name__ == "__main__":
password = getpass("Enter password: ")
cisco3 = {
"device_type": "cisco_ios",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
"session_log": "my_session.txt",
}
net_connect = ConnectHandler(**cisco3)
print(net_connect.find_prompt())
print(net_connect.send_command("show ip int brief"))
net_connect.disconnect()
| 26 | 56 | 0.648077 |
7946eaa0e993062333c4d08e262c3bf3da8fac09 | 4,237 | py | Python | stubs.min/System/Windows/Forms/__init___parts/Cursor.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Forms/__init___parts/Cursor.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Forms/__init___parts/Cursor.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class Cursor(object,IDisposable,ISerializable):
"""
Represents the image used to paint the mouse pointer.
Cursor(handle: IntPtr)
Cursor(stream: Stream)
Cursor(fileName: str)
Cursor(type: Type,resource: str)
"""
def CopyHandle(self):
"""
CopyHandle(self: Cursor) -> IntPtr
Copies the handle of this System.Windows.Forms.Cursor.
Returns: An System.IntPtr that represents the cursor's handle.
"""
pass
def Dispose(self):
"""
Dispose(self: Cursor)
Releases all resources used by the System.Windows.Forms.Cursor.
"""
pass
def Draw(self,g,targetRect):
"""
Draw(self: Cursor,g: Graphics,targetRect: Rectangle)
Draws the cursor on the specified surface,within the specified bounds.
g: The System.Drawing.Graphics surface on which to draw the
System.Windows.Forms.Cursor.
targetRect: The System.Drawing.Rectangle that represents the bounds of the
System.Windows.Forms.Cursor.
"""
pass
def DrawStretched(self,g,targetRect):
"""
DrawStretched(self: Cursor,g: Graphics,targetRect: Rectangle)
Draws the cursor in a stretched format on the specified surface,within the
specified bounds.
g: The System.Drawing.Graphics surface on which to draw the
System.Windows.Forms.Cursor.
targetRect: The System.Drawing.Rectangle that represents the bounds of the
System.Windows.Forms.Cursor.
"""
pass
def Equals(self,obj):
"""
Equals(self: Cursor,obj: object) -> bool
Returns a value indicating whether this cursor is equal to the specified
System.Windows.Forms.Cursor.
obj: The System.Windows.Forms.Cursor to compare.
Returns: true if this cursor is equal to the specified System.Windows.Forms.Cursor;
otherwise,false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: Cursor) -> int
Retrieves the hash code for the current System.Windows.Forms.Cursor.
Returns: A hash code for the current System.Windows.Forms.Cursor.
"""
pass
@staticmethod
def Hide():
"""
Hide()
Hides the cursor.
"""
pass
@staticmethod
def Show():
"""
Show()
Displays the cursor.
"""
pass
def ToString(self):
"""
ToString(self: Cursor) -> str
Retrieves a human readable string representing this System.Windows.Forms.Cursor.
Returns: A System.String that represents this System.Windows.Forms.Cursor.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,handle: IntPtr)
__new__(cls: type,fileName: str)
__new__(cls: type,type: Type,resource: str)
__new__(cls: type,stream: Stream)
"""
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __str__(self,*args):
pass
Handle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the handle of the cursor.
Get: Handle(self: Cursor) -> IntPtr
"""
HotSpot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the cursor hot spot.
Get: HotSpot(self: Cursor) -> Point
"""
Size=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the size of the cursor object.
Get: Size(self: Cursor) -> Size
"""
Tag=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the object that contains data about the System.Windows.Forms.Cursor.
Get: Tag(self: Cursor) -> object
Set: Tag(self: Cursor)=value
"""
Clip=None
Current=None
Position=None
| 26.987261 | 215 | 0.661317 |
7946eb9845b5bd62949017b4a02a2372adad6700 | 1,599 | py | Python | Car.py | Researchnix/Taco | 4946fa996638f7fd6704d14ab029c208bf625707 | [
"MIT"
] | null | null | null | Car.py | Researchnix/Taco | 4946fa996638f7fd6704d14ab029c208bf625707 | [
"MIT"
] | null | null | null | Car.py | Researchnix/Taco | 4946fa996638f7fd6704d14ab029c208bf625707 | [
"MIT"
] | null | null | null | #
# Car.py
# Taco --- SPH Innovation Challenge
#
# Created by Mat, Kon and Len on 2017-03-11.
# Copyright 2016 Researchnix. All rights reserved.
#
import collections
class Car:
# ID of the car
ID = ""
# starting point of the car
start = 0
# departure and arrival time stamps
startTime = 0
endTime = 0
# total travel time
totalTime = 0
# destinatino of the car
destination = 0
# This should be a list of vertices that the needs to visit to reach
# its desitination
InterRoute = []
# From the InterRoute one can simply obtain the StreetRoute,
# by replacing the i and i+1 entry by the unique street leading
# from i to i+1
StreetRoute = []
StreetQueue = collections.deque(StreetRoute)
# nextStreet = StreetRoute[0]
# record of actual roads visited
history = []
def __init__ (self, ID, start, destination, timestamp=0, route = None):
self.ID = ID
self.start = start
self.destination = destination
self.startTime = timestamp
self.history = list([])
if route is not None:
self.StreetRoute = route
self.StreetQueue = collections.deque(self.StreetRoute)
def dequeueNextSt(self):
try:
return self.StreetQueue.popleft()
except:
return None
def peekNextSt(self):
try:
return self.StreetQueue[0]
except:
return None
def arrive(self, timestamp):
self.endTime = timestamp
self.totalTime = self.endTime - self.startTime
| 23.173913 | 75 | 0.614134 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.