filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_15612
|
import os
from pathlib import Path
from typing import Union
from fractions import Fraction
from PIL import Image
import skvideo.io
import torch
import torchvision.transforms as transforms
from .const import mean, std, img_formats
class Sequence:
def __init__(self):
normalize = transforms.Normalize(mean=mean, std=std)
self.transform = transforms.Compose([transforms.ToTensor(), normalize])
def __iter__(self):
return self
def __next__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class ImageSequence(Sequence):
def __init__(self, imgs_dirpath: str, fps: float):
super().__init__()
self.fps = fps
assert os.path.isdir(imgs_dirpath)
self.imgs_dirpath = imgs_dirpath
self.file_names = [f for f in os.listdir(imgs_dirpath) if self._is_img_file(f)]
assert self.file_names
self.file_names.sort()
@classmethod
def _is_img_file(cls, path: str):
return Path(path).suffix.lower() in img_formats
def __next__(self):
for idx in range(0, len(self.file_names) - 1):
file_paths = self._get_path_from_name([self.file_names[idx], self.file_names[idx + 1]])
imgs = list()
for file_path in file_paths:
img = self._pil_loader(file_path)
img = self.transform(img)
imgs.append(img)
times_sec = [idx/self.fps, (idx + 1)/self.fps]
yield imgs, times_sec
def __len__(self):
return len(self.file_names) - 1
@staticmethod
def _pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
w_orig, h_orig = img.size
w, h = w_orig//32*32, h_orig//32*32
left = (w_orig - w)//2
upper = (h_orig - h)//2
right = left + w
lower = upper + h
img = img.crop((left, upper, right, lower))
return img
def _get_path_from_name(self, file_names: Union[list, str]) -> Union[list, str]:
if isinstance(file_names, list):
return [os.path.join(self.imgs_dirpath, f) for f in file_names]
return os.path.join(self.imgs_dirpath, file_names)
class VideoSequence(Sequence):
def __init__(self, video_filepath: str, fps: float=None):
super().__init__()
metadata = skvideo.io.ffprobe(video_filepath)
self.fps = fps
if self.fps is None:
self.fps = float(Fraction(metadata['video']['@avg_frame_rate']))
assert self.fps > 0, 'Could not retrieve fps from video metadata. fps: {}'.format(self.fps)
print('Using video metadata: Got fps of {} frames/sec'.format(self.fps))
# Length is number of frames - 1 (because we return pairs).
self.len = int(metadata['video']['@nb_frames']) - 1
self.videogen = skvideo.io.vreader(video_filepath)
self.last_frame = None
def __next__(self):
for idx, frame in enumerate(self.videogen):
h_orig, w_orig, _ = frame.shape
w, h = w_orig//32*32, h_orig//32*32
left = (w_orig - w)//2
upper = (h_orig - h)//2
right = left + w
lower = upper + h
frame = frame[upper:lower, left:right]
assert frame.shape[:2] == (h, w)
frame = self.transform(frame)
if self.last_frame is None:
self.last_frame = frame
continue
last_frame_copy = self.last_frame.detach().clone()
self.last_frame = frame
imgs = [last_frame_copy, frame]
times_sec = [(idx - 1)/self.fps, idx/self.fps]
yield imgs, times_sec
def __len__(self):
return self.len
|
the-stack_106_15613
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import KnoxFSTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
'''
SendHeadersTest -- test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
'''
direct_fetch_response_time = 0.05
class BaseNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
self.last_headers = message
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_close(self, conn):
self.disconnected = True
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
assert(wait_until(test_function, timeout=60))
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(KnoxFSTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
self.sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
self.sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
print("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
print("Part 1: success!")
print("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getdata([tip], timeout=5)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
inv_node.send_block_inv(tip)
# Should have received a getheaders as well!
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
[ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
print("Part 2: success!")
print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
print("Part 3: success!")
print("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
print("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
print("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_getdata = None
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders(timeout=1)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(timeout=1)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders(timeout=1)
# Eventually this stops working.
with mininode_lock:
self.last_getheaders = None
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
with mininode_lock:
self.last_getheaders = True
print("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
|
the-stack_106_15614
|
import pandas as pd
import sklearn as sk
import numpy as np
from os import path
from vowpalwabbit import pyvw
import pytest
def helper_get_test_dir():
curr_path = path.dirname(path.realpath(__file__))
return path.join(path.dirname(path.dirname(curr_path)), "test")
def helper_get_data():
train_data = [{'action': 1, 'cost': 2, 'probability': 0.4, 'feature1': 'a', 'feature2': 'c', 'feature3': ''},
{'action': 3, 'cost': 0, 'probability': 0.2, 'feature1': 'b', 'feature2': 'd', 'feature3': ''},
{'action': 4, 'cost': 1, 'probability': 0.5, 'feature1': 'a', 'feature2': 'b', 'feature3': ''},
{'action': 2, 'cost': 1, 'probability': 0.3, 'feature1': 'a', 'feature2': 'b', 'feature3': 'c'},
{'action': 3, 'cost': 1, 'probability': 0.7, 'feature1': 'a', 'feature2': 'd', 'feature3': ''}]
train_df = pd.DataFrame(train_data)
train_df['index'] = range(1, len(train_df) + 1)
train_df = train_df.set_index("index")
test_data = [{'feature1': 'b', 'feature2': 'c', 'feature3': ''},
{'feature1': 'a', 'feature2': '', 'feature3': 'b'},
{'feature1': 'b', 'feature2': 'b', 'feature3': ''},
{'feature1': 'a', 'feature2': '', 'feature3': 'b'}]
test_df = pd.DataFrame(test_data)
# Add index to data frame
test_df['index'] = range(1, len(test_df) + 1)
test_df = test_df.set_index("index")
return train_df, test_df
def test_getting_started_example_cb():
return helper_getting_started_example("--cb")
def test_getting_started_example_legacy_cb():
return helper_getting_started_example("--cb_force_legacy --cb")
def helper_getting_started_example(which_cb):
train_df, test_df = helper_get_data()
vw = pyvw.vw(which_cb + " 4", enable_logging=True)
for i in train_df.index:
action = train_df.loc[i, "action"]
cost = train_df.loc[i, "cost"]
probability = train_df.loc[i, "probability"]
feature1 = train_df.loc[i, "feature1"]
feature2 = train_df.loc[i, "feature2"]
feature3 = train_df.loc[i, "feature3"]
learn_example = str(action) + ":" + str(cost) + ":" + str(probability) + " | " + str(feature1) + " " + str(feature2) + " " + str(feature3)
vw.learn(learn_example)
assert vw.get_prediction_type() == vw.pMULTICLASS, "prediction_type should be multiclass"
for j in test_df.index:
feature1 = test_df.loc[j, "feature1"]
feature2 = test_df.loc[j, "feature2"]
feature3 = test_df.loc[j, "feature3"]
choice = vw.predict("| "+str(feature1)+" "+str(feature2)+" "+str(feature3))
assert isinstance(choice, int), "choice should be int"
assert choice == 3, "predicted action should be 3 instead of " + str(choice)
vw.finish()
output = vw.get_log()
if which_cb.find("legacy") != -1:
test_file = "test-sets/ref/python_test_cb_legacy.stderr"
else:
test_file = "test-sets/ref/python_test_cb.stderr"
with open(path.join(helper_get_test_dir(), test_file), 'r') as file:
actual = file.readlines()
for j, i in zip(actual, output):
assert i == j, "line mismatch should be: " + j + " output: " + i
def test_getting_started_example_with():
train_df, test_df = helper_get_data()
# with syntax calls into vw.finish() automatically.
# you actually want to use 'with pyvw.vw("--cb 4") as vw:'
# but we need to assert on vw.finished for test purposes
vw = pyvw.vw("--cb 4")
with vw as vw:
for i in train_df.index:
action = train_df.loc[i, "action"]
cost = train_df.loc[i, "cost"]
probability = train_df.loc[i, "probability"]
feature1 = train_df.loc[i, "feature1"]
feature2 = train_df.loc[i, "feature2"]
feature3 = train_df.loc[i, "feature3"]
learn_example = str(action) + ":" + str(cost) + ":" + str(probability) + " | " + str(feature1) + " " + str(feature2) + " " + str(feature3)
vw.learn(learn_example)
assert vw.get_prediction_type() == vw.pMULTICLASS, "prediction_type should be multiclass"
for j in test_df.index:
feature1 = test_df.loc[j, "feature1"]
feature2 = test_df.loc[j, "feature2"]
feature3 = test_df.loc[j, "feature3"]
choice = vw.predict("| "+str(feature1)+" "+str(feature2)+" "+str(feature3))
assert isinstance(choice, int), "choice should be int"
assert choice == 3, "predicted action should be 3"
assert vw.finished == True, "with syntax should finish() vw instance"
|
the-stack_106_15615
|
# -*- coding: utf-8 -*-
'''
Execute calls on selinux
.. note::
This module requires the ``semanage`` and ``setsebool`` commands to be
available on the minion. On RHEL-based distros, this means that the
``policycoreutils`` and ``policycoreutils-python`` packages must be
installed. If not on a RHEL-based distribution, consult the selinux
documentation for your distro to ensure that the proper packages are
installed.
'''
from __future__ import absolute_import
# Import python libs
import os
# Import salt libs
import salt.utils
import salt.utils.decorators as decorators
from six import string_types
from salt.exceptions import CommandExecutionError
def __virtual__():
'''
Check if the os is Linux, and then if selinux is running in permissive or
enforcing mode.
'''
required_cmds = ('semanage', 'setsebool')
# Iterate over all of the commands this module uses and make sure
# each of them are available in the standard PATH to prevent breakage
for cmd in required_cmds:
if not salt.utils.which(cmd):
return False
# SELinux only makes sense on Linux *obviously*
if __grains__['kernel'] == 'Linux' and selinux_fs_path():
return 'selinux'
return False
# Cache the SELinux directory to not look it up over and over
@decorators.memoize
def selinux_fs_path():
'''
Return the location of the SELinux VFS directory
CLI Example:
.. code-block:: bash
salt '*' selinux.selinux_fs_path
'''
# systems running systemd (e.g. Fedora 15 and newer)
# have the selinux filesystem in a different location
for directory in ('/sys/fs/selinux', '/selinux'):
if os.path.isdir(directory):
if os.path.isfile(os.path.join(directory, 'enforce')):
return directory
return None
def getenforce():
'''
Return the mode selinux is running in
CLI Example:
.. code-block:: bash
salt '*' selinux.getenforce
'''
try:
enforce = os.path.join(selinux_fs_path(), 'enforce')
with salt.utils.fopen(enforce, 'r') as _fp:
if _fp.readline().strip() == '0':
return 'Permissive'
else:
return 'Enforcing'
except (IOError, OSError, AttributeError) as exc:
msg = 'Could not read SELinux enforce file: {0}'
raise CommandExecutionError(msg.format(str(exc)))
def setenforce(mode):
'''
Set the SELinux enforcing mode
CLI Example:
.. code-block:: bash
salt '*' selinux.setenforce enforcing
'''
if isinstance(mode, string_types):
if mode.lower() == 'enforcing':
mode = '1'
elif mode.lower() == 'permissive':
mode = '0'
else:
return 'Invalid mode {0}'.format(mode)
elif isinstance(mode, int):
if mode:
mode = '1'
else:
mode = '0'
else:
return 'Invalid mode {0}'.format(mode)
enforce = os.path.join(selinux_fs_path(), 'enforce')
try:
with salt.utils.fopen(enforce, 'w') as _fp:
_fp.write(mode)
except (IOError, OSError) as exc:
msg = 'Could not write SELinux enforce file: {0}'
raise CommandExecutionError(msg.format(str(exc)))
return getenforce()
def getsebool(boolean):
'''
Return the information on a specific selinux boolean
CLI Example:
.. code-block:: bash
salt '*' selinux.getsebool virt_use_usb
'''
return list_sebool().get(boolean, {})
def setsebool(boolean, value, persist=False):
'''
Set the value for a boolean
CLI Example:
.. code-block:: bash
salt '*' selinux.setsebool virt_use_usb off
'''
if persist:
cmd = 'setsebool -P {0} {1}'.format(boolean, value)
else:
cmd = 'setsebool {0} {1}'.format(boolean, value)
return not __salt__['cmd.retcode'](cmd)
def setsebools(pairs, persist=False):
'''
Set the value of multiple booleans
CLI Example:
.. code-block:: bash
salt '*' selinux.setsebools '{virt_use_usb: on, squid_use_tproxy: off}'
'''
if not isinstance(pairs, dict):
return {}
if persist:
cmd = 'setsebool -P '
else:
cmd = 'setsebool '
for boolean, value in pairs.items():
cmd = '{0} {1}={2}'.format(cmd, boolean, value)
return not __salt__['cmd.retcode'](cmd)
def list_sebool():
'''
Return a structure listing all of the selinux booleans on the system and
what state they are in
CLI Example:
.. code-block:: bash
salt '*' selinux.list_sebool
'''
bdata = __salt__['cmd.run']('semanage boolean -l').splitlines()
ret = {}
for line in bdata[1:]:
if not line.strip():
continue
comps = line.split()
ret[comps[0]] = {'State': comps[1][1:],
'Default': comps[3][:-1],
'Description': ' '.join(comps[4:])}
return ret
|
the-stack_106_15618
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Early Stopping
^^^^^^^^^^^^^^
Monitor a metric and stop training when it stops improving.
"""
import logging
from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
log = logging.getLogger(__name__)
class EarlyStopping(Callback):
r"""
Monitor a metric and stop training when it stops improving.
Args:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no improvement.
patience: number of checks with no improvement
after which training will be stopped. Under the default configuration, one check happens after
every training epoch. However, the frequency of validation can be modified by setting various parameters on
the ``Trainer``, for example ``check_val_every_n_epoch`` and ``val_check_interval``.
.. note::
It must be noted that the patience parameter counts the number of validation checks with
no improvement, and not the number of training epochs. Therefore, with parameters
``check_val_every_n_epoch=10`` and ``patience=3``, the trainer will perform at least 40 training
epochs before being stopped.
verbose: verbosity mode.
mode: one of ``'min'``, ``'max'``. In ``'min'`` mode, training will stop when the quantity
monitored has stopped decreasing and in ``'max'`` mode it will stop when the quantity
monitored has stopped increasing.
strict: whether to crash the training if `monitor` is not found in the validation metrics.
check_finite: When set ``True``, stops training when the monitor becomes NaN or infinite.
stopping_threshold: Stop training immediately once the monitored quantity reaches this threshold.
divergence_threshold: Stop training as soon as the monitored quantity becomes worse than this threshold.
check_on_train_epoch_end: whether to run early stopping at the end of the training epoch.
If this is ``False``, then the check runs at the end of the validation.
Raises:
MisconfigurationException:
If ``mode`` is none of ``"min"`` or ``"max"``.
RuntimeError:
If the metric ``monitor`` is not available.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(callbacks=[early_stopping])
.. tip:: Saving and restoring multiple early stopping callbacks at the same time is supported under variation in the
following arguments:
*monitor, mode*
Read more: :ref:`Persisting Callback State`
"""
mode_dict = {"min": torch.lt, "max": torch.gt}
order_dict = {"min": "<", "max": ">"}
def __init__(
self,
monitor: Optional[str] = None,
min_delta: float = 0.0,
patience: int = 3,
verbose: bool = False,
mode: str = "min",
strict: bool = True,
check_finite: bool = True,
stopping_threshold: Optional[float] = None,
divergence_threshold: Optional[float] = None,
check_on_train_epoch_end: Optional[bool] = None,
):
super().__init__()
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.mode = mode
self.strict = strict
self.check_finite = check_finite
self.stopping_threshold = stopping_threshold
self.divergence_threshold = divergence_threshold
self.wait_count = 0
self.stopped_epoch = 0
self._check_on_train_epoch_end = check_on_train_epoch_end
if self.mode not in self.mode_dict:
raise MisconfigurationException(f"`mode` can be {', '.join(self.mode_dict.keys())}, got {self.mode}")
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
torch_inf = torch.tensor(np.Inf)
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
if monitor is None:
rank_zero_deprecation(
"The `EarlyStopping(monitor)` argument will be required starting in v1.6."
" For backward compatibility, setting this to `early_stop_on`."
)
self.monitor = monitor or "early_stop_on"
@property
def state_key(self) -> str:
return self._generate_state_key(monitor=self.monitor, mode=self.mode)
def on_init_end(self, trainer: "pl.Trainer") -> None:
if self._check_on_train_epoch_end is None:
# if the user runs validation multiple times per training epoch or multiple training epochs without
# validation, then we run after validation instead of on train epoch end
self._check_on_train_epoch_end = trainer.val_check_interval == 1.0 and trainer.check_val_every_n_epoch == 1
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (
f"Early stopping conditioned on metric `{self.monitor}` which is not available."
" Pass in or modify your `EarlyStopping` callback to use any of the following:"
f' `{"`, `".join(list(logs.keys()))}`'
)
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self) -> Callable:
return self.mode_dict[self.mode]
def on_save_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any]
) -> Dict[str, Any]:
return {
"wait_count": self.wait_count,
"stopped_epoch": self.stopped_epoch,
"best_score": self.best_score,
"patience": self.patience,
}
def on_load_checkpoint(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", callback_state: Dict[str, Any]
) -> None:
self.wait_count = callback_state["wait_count"]
self.stopped_epoch = callback_state["stopped_epoch"]
self.best_score = callback_state["best_score"]
self.patience = callback_state["patience"]
def _should_skip_check(self, trainer) -> bool:
from pytorch_lightning.trainer.states import TrainerFn
return trainer.state.fn != TrainerFn.FITTING or trainer.sanity_checking
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if not self._check_on_train_epoch_end or self._should_skip_check(trainer):
return
self._run_early_stopping_check(trainer)
def on_validation_end(self, trainer, pl_module) -> None:
if self._check_on_train_epoch_end or self._should_skip_check(trainer):
return
self._run_early_stopping_check(trainer)
def _run_early_stopping_check(self, trainer: "pl.Trainer") -> None:
"""Checks whether the early stopping condition is met and if so tells the trainer to stop the training."""
logs = trainer.callback_metrics
if trainer.fast_dev_run or not self._validate_condition_metric( # disable early_stopping with fast_dev_run
logs
): # short circuit if metric not present
return
current = logs.get(self.monitor)
should_stop, reason = self._evaluate_stopping_criteria(current)
# stop every ddp process if any world process decides to stop
should_stop = trainer.training_type_plugin.reduce_boolean_decision(should_stop)
trainer.should_stop = trainer.should_stop or should_stop
if should_stop:
self.stopped_epoch = trainer.current_epoch
if reason and self.verbose:
self._log_info(trainer, reason)
def _evaluate_stopping_criteria(self, current: torch.Tensor) -> Tuple[bool, str]:
should_stop = False
reason = None
if self.check_finite and not torch.isfinite(current):
should_stop = True
reason = (
f"Monitored metric {self.monitor} = {current} is not finite."
f" Previous best value was {self.best_score:.3f}. Signaling Trainer to stop."
)
elif self.stopping_threshold is not None and self.monitor_op(current, self.stopping_threshold):
should_stop = True
reason = (
"Stopping threshold reached:"
f" {self.monitor} = {current} {self.order_dict[self.mode]} {self.stopping_threshold}."
" Signaling Trainer to stop."
)
elif self.divergence_threshold is not None and self.monitor_op(-current, -self.divergence_threshold):
should_stop = True
reason = (
"Divergence threshold reached:"
f" {self.monitor} = {current} {self.order_dict[self.mode]} {self.divergence_threshold}."
" Signaling Trainer to stop."
)
elif self.monitor_op(current - self.min_delta, self.best_score.to(current.device)):
should_stop = False
reason = self._improvement_message(current)
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
if self.wait_count >= self.patience:
should_stop = True
reason = (
f"Monitored metric {self.monitor} did not improve in the last {self.wait_count} records."
f" Best score: {self.best_score:.3f}. Signaling Trainer to stop."
)
return should_stop, reason
def _improvement_message(self, current: torch.Tensor) -> str:
"""Formats a log message that informs the user about an improvement in the monitored score."""
if torch.isfinite(self.best_score):
msg = (
f"Metric {self.monitor} improved by {abs(self.best_score - current):.3f} >="
f" min_delta = {abs(self.min_delta)}. New best score: {current:.3f}"
)
else:
msg = f"Metric {self.monitor} improved. New best score: {current:.3f}"
return msg
@staticmethod
def _log_info(trainer: Optional["pl.Trainer"], message: str) -> None:
if trainer is not None and trainer.world_size > 1:
log.info(f"[rank: {trainer.global_rank}] {message}")
else:
log.info(message)
|
the-stack_106_15619
|
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
"""Module defining a Charm providing database management for FINOS Legend."""
import logging
from charms.finos_legend_db_k8s.v0 import legend_database
from charms.mongodb_k8s.v0.mongodb import MongoConsumer
from ops import charm, framework, main, model
logger = logging.getLogger(__name__)
MONGODB_RELATION_NAME = "db"
LEGEND_DB_RELATION_NAME = "legend-db"
class LegendDatabaseManagerCharm(charm.CharmBase):
"""Charm which shares a MongodDB relation with related Legend Services."""
_stored = framework.StoredState()
def __init__(self, *args):
super().__init__(*args)
self._set_stored_defaults()
# General hooks:
self.framework.observe(self.on.install, self._on_install)
# MongoDB consumer setup:
self._mongodb_consumer = MongoConsumer(self, MONGODB_RELATION_NAME)
# LDB library setup:
self._legend_db_consumer = legend_database.LegendDatabaseConsumer(self)
# Mongo relation lifecycle events:
self.framework.observe(
self.on[MONGODB_RELATION_NAME].relation_joined, self._on_db_relation_joined
)
self.framework.observe(
self.on[MONGODB_RELATION_NAME].relation_changed, self._on_db_relation_changed
)
# Legend component relation events:
self.framework.observe(
self.on[LEGEND_DB_RELATION_NAME].relation_joined, self._on_legend_db_relation_joined
)
self.framework.observe(
self.on[LEGEND_DB_RELATION_NAME].relation_changed, self._on_legend_db_relation_changed
)
# Set blocked status until MongoDB is realted:
if not self.unit.status:
self.unit.status = model.BlockedStatus("requires relating to: mongodb-k8s")
def _set_stored_defaults(self) -> None:
self._stored.set_default(log_level="DEBUG")
def _on_install(self, _: charm.InstallEvent):
self.unit.status = model.BlockedStatus("requires relating to: mongodb-k8s")
def _on_config_changed(self, _) -> None:
# NOTE(aznashwan): this charm does not yet have any config options:
pass
def _on_db_relation_joined(self, event: charm.RelationJoinedEvent):
pass
def _set_legend_db_creds_in_relation(self, legend_database_creds, relation):
"""Attempts to add the given Database creds to the given relation's data.
Returns a `model.BlockedStatus` if it was unable to set the rel data.
"""
if not legend_database.set_legend_database_creds_in_relation_data(
relation.data[self.app], legend_database_creds
):
return model.BlockedStatus(
"failed to set creds in legend db relation: %s" % (relation.id)
)
return None
def _set_legend_db_creds_in_relations(self, legend_database_creds):
"""Shares the MongoDB creds with all related Lenged services.
Returns a `model.BlockedStatus` if it was unable to set the rel data.
"""
for relation in self.model.relations[LEGEND_DB_RELATION_NAME]:
possible_blocked_status = self._set_legend_db_creds_in_relation(
legend_database_creds, relation
)
if possible_blocked_status:
return possible_blocked_status
return None
def _get_mongo_db_credentials(self, rel_id=None):
"""Returns MongoDB creds or a `Waiting/BlockedStatus` otherwise."""
# Check whether credentials for a database are available:
mongo_creds = self._mongodb_consumer.credentials(rel_id)
if not mongo_creds:
return model.WaitingStatus("waiting for mongo database credentials")
# Check whether the databases were created:
databases = self._mongodb_consumer.databases(rel_id)
if not databases:
self._mongodb_consumer.new_database()
return model.WaitingStatus("waiting for mongo database creation")
# Fetch the credentials from the relation data:
get_creds = legend_database.get_database_connection_from_mongo_data
legend_database_creds = get_creds(mongo_creds, databases)
if not legend_database_creds:
return model.BlockedStatus(
"failed to process MongoDB connection data for legend db "
"format, please review the debug-log for full details"
)
logger.debug(
"Current Legend MongoDB creds provided by the relation are: %s", legend_database_creds
)
return legend_database_creds
def _on_db_relation_changed(self, event: charm.RelationChangedEvent) -> None:
rel_id = event.relation.id
legend_database_creds = self._get_mongo_db_credentials(rel_id)
if isinstance(legend_database_creds, (model.WaitingStatus, model.BlockedStatus)):
self.unit.status = legend_database_creds
return
# Propagate the DB creds to all relating Legend services:
possible_blocked_status = self._set_legend_db_creds_in_relations(legend_database_creds)
if possible_blocked_status:
self.unit.status = possible_blocked_status
return
self.unit.status = model.ActiveStatus()
def _on_legend_db_relation_joined(self, event: charm.RelationJoinedEvent):
legend_database_creds = self._get_mongo_db_credentials()
if isinstance(legend_database_creds, (model.WaitingStatus, model.BlockedStatus)):
logger.warning(
"Could not provide Legend MongoDB creds to relation '%s' as none are "
"currently available",
event.relation.id,
)
self.unit.status = legend_database_creds
return
# Add the creds to the relation:
possible_blocked_status = self._set_legend_db_creds_in_relation(
legend_database_creds, event.relation
)
if possible_blocked_status:
self.unit.status = possible_blocked_status
return
def _on_legend_db_relation_changed(self, event: charm.RelationChangedEvent):
pass
if __name__ == "__main__":
main.main(LegendDatabaseManagerCharm)
|
the-stack_106_15621
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for implementing the `datalab delete` command."""
from __future__ import absolute_import
from . import utils
description = ("""`{0} {1}` deletes the given Datalab instance's
Google Compute Engine VM.
By default, the persistent disk's auto-delete configuration determines
whether or not that disk is also deleted.
If you wish to override that setting, you can pass in one of either the
`--delete-disk` flag or the `--keep-disk` flag.
For more information on disk auto-deletion, see
https://cloud.google.com/compute/docs/disks/persistent-disks#updateautodelete
""")
_DELETE_DISK_HELP = ("""Whether or not to delete the instance's persistent disk
regardless of the disks' auto-delete configuration.""")
_KEEP_DISK_HELP = ("""Whether or not to keep the instance's persistent disk
regardless of the disks' auto-delete configuration.""")
_DELETE_BASE_PROMPT = ("""The following instance will be deleted:
- [{}] in [{}]
The corresponding notebooks disk {}.
""")
def flags(parser):
"""Add command line flags for the `delete` subcommand.
Args:
parser: The argparse parser to which to add the flags.
"""
parser.add_argument(
'instance',
metavar='NAME',
help='name of the instance to delete')
auto_delete_override = parser.add_mutually_exclusive_group()
auto_delete_override.add_argument(
'--delete-disk',
dest='delete_disk',
action='store_true',
help=_DELETE_DISK_HELP)
auto_delete_override.add_argument(
'--keep-disk',
dest='keep_disk',
action='store_true',
help=_KEEP_DISK_HELP)
return
def run(args, gcloud_compute, gcloud_zone=None, **unused_kwargs):
"""Implementation of the `datalab delete` subcommand.
Args:
args: The Namespace instance returned by argparse
gcloud_compute: Function that can be used to invoke `gcloud compute`
gcloud_zone: The zone that gcloud is configured to use
Raises:
subprocess.CalledProcessError: If a nested `gcloud` calls fails
"""
instance = args.instance
utils.maybe_prompt_for_zone(args, gcloud_compute, instance)
base_cmd = ['instances', 'delete', '--quiet']
if args.zone:
base_cmd.extend(['--zone', args.zone])
instance_zone = args.zone
else:
instance_zone = gcloud_zone
if args.delete_disk:
base_cmd.extend(['--delete-disks', 'data'])
notebooks_disk_message_part = 'will be deleted'
elif args.keep_disk:
base_cmd.extend(['--keep-disks', 'data'])
notebooks_disk_message_part = 'will not be deleted'
else:
disk_cfg = utils.instance_notebook_disk(args, gcloud_compute, instance)
if not disk_cfg:
notebooks_disk_message_part = 'is not attached'
elif disk_cfg['autoDelete']:
notebooks_disk_message_part = 'will be deleted'
else:
notebooks_disk_message_part = 'will not be deleted'
message = _DELETE_BASE_PROMPT.format(
instance, instance_zone, notebooks_disk_message_part)
if not utils.prompt_for_confirmation(
args=args,
message=message,
accept_by_default=True):
print('Deletion aborted by user; Exiting.')
return
print('Deleting {0}'.format(instance))
gcloud_compute(args, base_cmd + [instance])
return
|
the-stack_106_15622
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import sympy as sp
import pickle
import sys
from ipydex import IPS, activate_ips_on_exception
from pytrajectory import auxiliary as aux, log, TransitionProblem
# activate verbose debug-messages
log.console_handler.setLevel(10)
activate_ips_on_exception()
# Daten des Modells laden
fname = "pickles/model.pcl"
with open(fname, "rb") as pfile:
pdict = pickle.load(pfile)
print(fname, "gelesen")
# Aus dem dict in "normale" Variablen laden
q_symbs = pdict['symbols']
params = pdict['parameters']
params_values = pdict['parameter_values']
qdd_part_lin_num = pdict['qdd_part_lin_num']
Anum = pdict['Anum']
Bnum = pdict['Bnum']
a = pdict['a']
q1, q2, q3, q4, q1d, q2d, q3d, q4d = q_symbs[:-4]
q1dd_expr, q2dd_expr, q3dd_expr, q4dd_expr = qdd_part_lin_num[-4:]
### sympy-Ausdrücke in aufrufbare Python-Funktionen umwandeln
q1dd_fnc = sp.lambdify([q1, q2, q3, q4, q1d, q2d, q3d, q4d, a], q1dd_expr, 'sympy')
q2dd_fnc = sp.lambdify([q1, q2, q3, q4, q1d, q2d, q3d, q4d, a], q2dd_expr, 'sympy')
q3dd_fnc = sp.lambdify([q1, q2, q3, q4, q1d, q2d, q3d, q4d, a], q3dd_expr, 'sympy')
q4dd_fnc = sp.lambdify([q1, q2, q3, q4, q1d, q2d, q3d, q4d, a], q4dd_expr, 'sympy')
# nicht lineares partiell linarisiertes Modell
def model_rhs(state, u, uref, t, pp,):
# ignored arguments: uref, t, pp,
x1, x2, x3, x4, x5, x6, x7, x8 = state # q1, q2, q3, q4, q1d, q2d, q3d, q4d
stell, = u
x1d = x5
x2d = x6
x3d = x7
x4d = x8
x5d = q1dd_fnc(x1, x2, x3, x4, x5, x6, x7, x8, stell)
x6d = q2dd_fnc(x1, x2, x3, x4, x5, x6, x7, x8, stell)
x7d = q3dd_fnc(x1, x2, x3, x4, x5, x6, x7, x8, stell)
x8d = q4dd_fnc(x1, x2, x3, x4, x5, x6, x7, x8, stell)
return np.array([x1d, x2d, x3d, x4d, x5d, x6d, x7d, x8d])
Ta = 0.0
Tb = 0.9
ua = 0.0
ub = 0.0
xa = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
xb = [0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0]
# S = ControlSystem(model_rhs, Ta, Tb, xa, xb, ua, ub)
# state, u = S.solve()
# constraints for the velocity of the car
# con = {"x7": [-4, 4]}
con = {}
if __name__ == "__main__":
args = aux.Container(
poolsize=2, ff=model_rhs, a=Ta, xa=xa, xb=xb, ua=0, ub=0,
use_chains=False, ierr=None, maxIt=5, eps=4e-1, kx=2, use_std_approach=False,
seed=[1, ], constraints=con, show_ir=True, b=3.7 # + np.r_[0, .1, .2, .3, .4, .5]
)
if "single" in sys.argv:
args.b = 3.7
args.maxIt = 7
args.dict.pop("poolsize")
args.show_ir = False
args.seed = 1
args.maxIt = 4
args.mpc_th = 3
TP1 = TransitionProblem(**args.dict)
results = xx, uu = TP1.solve()
# ensure that the result is compatible with system dynamics
sic = TP1.return_sol_info_container()
# collocation points:
cp1 = TP1.eqs.cpts[1]
# right hand side
ffres = np.array(model_rhs(xx(cp1), uu(cp1), None, None, None), dtype=np.float)
# derivative of the state trajectory (left hand side)
dxres = TP1.eqs.trajectories.dx(cp1)
err = dxres - ffres
# the entries of `err` form part of the entries of the following
Fx = TP1.eqs.opt_problem_F(sic.opt_sol)
# this is (almost) the same as c.solver_res
normFx = np.linalg.norm(Fx)
else:
results = aux.parallelizedTP(debug=False, **args.dict)
IPS()
|
the-stack_106_15624
|
# MIT License
#
# Copyright (c) 2018 Stichting SingularityNET
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
### Import packages
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from logging import debug
from math import ceil, floor
### Get strings between two strings; will be useful when extracting the date.
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
### Get strings between two strings; will be useful when extracting the date. Different way of implementing the
### same thing as above.
def find_between_r( s, first, last ):
try:
start = s.rindex( first ) + len( first )
end = s.rindex( last, start )
return s[start:end]
except ValueError:
return ""
### Below two functions will make sure we get difference between times.
def parse_prefix(line, fmt):
cover = len(datetime.now().strftime(fmt))
return datetime.strptime(line[:cover], fmt)
### Calculate days between d2 and d1.
def days_between(d1, d2):
return abs((d2 - d1).days)
def downratings(condition,ratings):
if condition:
### it is expected current_max to be 1.
current_max = 1### Note that maximum is set to 1 as per documentation. We do not allow
### values higher than 1.
i = 0
while i<len(ratings):
if ratings[i]['value']>current_max:
### as soon as we find 1 value above 1, we raise an error.
raise ValueError("Downratings are not on the scale of 0 to 1, as required.")
i+=1
i=0
while i<len(ratings):
### it is expected ratings to be converted to range -100 to +100 (or -1.0 to +1.0 on -100% to +100% scale)
ratings[i]['value'] = ratings[i]['value']/current_max
### current_max is set to 1, so we are essentially dividing by 1. Now, below, we are converting everything
### to the range of -1 to 1.
if ratings[i]['value']<0.25:
ratings[i]['value'] = ratings[i]['value']/0.25-1
else:
ratings[i]['value'] = (ratings[i]['value']-0.25)/0.75
### Then we multiply by 100, so we get it btw -100 and 100.
ratings[i]['value'] = ratings[i]['value'] * 100
i+=1
return(ratings)
else:
### If downratings=false, then we do nothing.
return(ratings)
### Rounding is fixed here. Normal rounding in Python rounds 0.5 to 0, this little function prevents that.
def my_round(n, ndigits):
part = n * 10 ** ndigits
delta = part - int(part)
# always round "away from 0"
if delta >= 0.5 or -0.5 < delta <= 0:
part = ceil(part)
else:
part = floor(part)
return part / (10 ** ndigits)
### Transforming ratings to logarithm, if needed. logratings might or might not be set to true.
def transform_ratings(ratings, logratings):
if logratings:
i=0
while i<len(ratings):
### Transformations of weight depending on the value. If smaller than 0, then we need to adjust a bit.
if ratings[i]['weight']!=None:
if ratings[i]['weight']<0:
ratings[i]['weight'] = -np.log10(1-ratings[i]['weight'])
else:
ratings[i]['weight'] = np.log10(1+ratings[i]['weight'])
else:
### We do the same with value.
if ratings[i]['value']<0:
ratings[i]['value'] = -np.log10(1-ratings[i]['value'])
else:
ratings[i]['value'] = np.log10(1+ratings[i]['value'])#np.log10(1+ratings[i]['value'])
i+=1
return(ratings)
### Weight calculation. Only problem is if we have no value number. If we do, we just call logratings_precision.
def weight_calc(value,lograting,precision,weighting):
if value != None:
return(logratings_precision(value,lograting,precision,weighting))
else:
return(1,None)
### Get starting dates and first occurances of each addresses. Also, preparation or arrays and other data
### to be used in the future.
### Note; Given that we take an approach where we don't need first_occurance, we decide to put as a default option
### need_occurance=False.
def reputation_calc_p1(new_subset,conservatism,precision,temporal_aggregation=False,need_occurance=False,
logratings=False,downrating=False,weighting=True,rater_bias = None,averages = None):
### need_occurance is set to false by default and might even be removed for good. It was made in order to
### facilitate some other approaches towards updating rankings, which we decided not to use in the end.
#### We will need from, to, amount, the rest is not necessary to have - let's save memory.
### Now we will just store the first occurance of each account in a dictionary.
## Inputs are dictionaries, arrays and True/False statements.
### We change the subeset that is incoming in order to put downratings transformation.
new_subset = downratings(downrating,new_subset)
# (by mark) new subset is the current ratings if downrating is true, downrating adusted
if rater_bias != None:
rater_bias,average_rating = update_biases(rater_bias,new_subset,conservatism)
our_averages = dict()
for k in averages:
for j in averages[k].keys():
if j in our_averages.keys():
our_averages[j].append(averages[k][j])
else:
our_averages[j] = [averages[k][j]]
our_average = dict()
for k in our_averages.keys():
our_average[k] = np.mean(our_averages[k])
new_subset = fix_rater_bias(new_subset,rater_bias,our_average)
i=0
new_array = []
israting = True
while i<len(new_subset):
if 'value' in list(new_subset[i].keys()):
### put ratings in array. Note, that we don't always have information about rating, that is
### what ratings were given by specific customers.
### This array is standardized.
if 'weight' in new_subset[i].keys():
new_array.append([new_subset[i]['from'],new_subset[i]['to'],new_subset[i]['weight'],new_subset[i]['value']])
else:
new_array.append([new_subset[i]['from'],new_subset[i]['to'],None,new_subset[i]['value']])
else:
israting = False
if 'weight' in new_subset[i].keys():
new_array.append([new_subset[i]['from'],new_subset[i]['to'],new_subset[i]['weight']])
else:
new_array.append([new_subset[i]['from'],new_subset[i]['to'],None])
i+=1
### We make array of dates and transactions to specific agents.
dates_array = []
to_array = []
i = 0
### we have array of only dates and array of ids which are getting transactions.
while i<len(new_subset):
dates_array.append(new_subset[i]['time'])
to_array.append(new_subset[i]['to'])
i+=1
### In case we have temporal aggregation
if temporal_aggregation:
from_data = []
to_data = to_array
i = 0
while i<len(new_array):
### we merge all the 'from' data.
from_data.append(int(new_array[i][0]))
i+=1
### Temporal aggregation=True;
### First let's just find all the duplicates;
### We merge from and to arrays and look for unique ones...
### The idea of this code is that if there were multiple transactions in a day, we merge them and look at
### the averages.
merged = []
i=0
while i<len(from_data):
### We get all the from-to id combinations.
newnr = str(from_data[i])+"_"+str(to_data[i])
merged.append(newnr)
i+=1
### Here we just count how many times it appears
already_used = {}
### We count how many times each combination appeared.
for i in merged:
if i in already_used.keys():
already_used[i] = already_used[i] + 1
else:
already_used[i] = 1
### Good, now we know exact nr of transactions for each pair...
#### merged data has the same indexing as new_array.
i = 0
### If exists, pass, otherwise this:
### We sum up each feature.
already_used2 = {}
new_array2 = []
to_array2 = []
amounts = {}
ratings = {}
while i<len(merged):
if merged[i] in already_used2.keys():
new_rating, new_weight = weight_calc(new_array[i],logratings,precision,weighting)
amounts[merged[i]] = amounts[merged[i]] + new_rating
if israting:
### then we sum up ratings.
ratings[merged[i]] = ratings[merged[i]] + new_array[i][3]
else:
already_used2[merged[i]]=1
new_rating, new_weight = weight_calc(new_array[i],logratings,precision,weighting)
amounts[merged[i]] = new_rating
if israting:
ratings[merged[i]] = new_array[i][3]
i+=1
i=0
### And divide it by the number of times it appears - getting average.
already_used2 = {}
while i<len(merged):
if merged[i] in already_used2.keys():
pass
else:
already_used2[merged[i]]=1
### Just set some value.
new_array2.append(new_array[i])
new_array2[len(new_array2)-1][2] = amounts[merged[i]]/already_used[merged[i]]
if israting:
new_array2[len(new_array2)-1][3] = ratings[merged[i]]/already_used[merged[i]]
to_array2.append(to_array[i])
i+=1
new_array = new_array2
to_array = to_array2
if rater_bias != None:
return(new_array,dates_array,to_array,rater_bias,average_rating)
else:
return(new_array,dates_array,to_array,rater_bias)
### Get new reputations in case we do not yet have the old ones.
def update_reputation(reputation,new_array,default_reputation,spendings):
i = 0
new_ids = []
while i<len(new_array):
### If we already have it, we do nothing in this function...
### The rest is also checking for "to" transactions and doing the same thing there..
if new_array[i][1] in reputation: #(by mark) if the supplier is in reputation
### If reputation already has an id, we go on, otherwise we add default reputation.
pass
else:
new_ids.append(new_array[i][1])
reputation[new_array[i][1]] = default_reputation
### If we have spendings, we need reputation also for buyers. We make it default if it does not exist yet.
if spendings>0:
if new_array[i][0] in reputation: # (by mark) if the buyer is in reputation
pass
else: #(by mark) if the buyer doesn't have reputation,
new_ids.append(new_array[i][0])
reputation[new_array[i][0]] = default_reputation
i+=1
return(reputation)
### Logratings calculation, calculating weights and fixing for precision.
def logratings_precision(rating,lograting,precision,weighting):
new_weight = None # assume no weight computed by default
### if weighting = False, then we return values only.
if not weighting:
return(rating[3],None)
if lograting:
### We go through few posibilities about what can happen.
### If there are no weights then:
if rating[2] == None:
### depending on precision, we make a log transformation with or without it.
if precision==None:
new_rating = np.log10(1+ rating[3])
else:
new_rating = np.log10(1+ int(rating[3]/precision))
else:
### if we have no values;
if rating[3] == None:
### Then we work with weights only.
if precision==None:
new_rating = np.log10(1+ rating[2])
else:
new_rating = np.log10(1+ int(rating[2]/precision))
else:
### if we have values and weights, we multiply them together.
if precision==None:
new_weight = np.log10(1+ rating[2])
else:
new_weight = np.log10(1+ rating[2]/precision)
new_rating = my_round(new_weight * rating[3],0)
else:
### If not lograting, we do not do log transformation.
if precision==None:
precision=1
if rating[2] == None:#(by mark) financial value or weight
new_rating = rating[3]/precision
else:
if rating[3] == None: #(by mark) rating value
new_rating = rating[2]/precision
else:
new_weight = rating[2]/precision
new_rating = rating[3] * new_weight
new_rating = my_round(new_rating,0)
return(new_rating,new_weight) #return weighted value Fij*Qij to sum and weight Qij to denominate later in dRit = Σj (Fij * Qij * Rjt-1 ) / Σj (Qij)
def update_biases(previous_bias,new_arrays, conservatism):
'''
(by mark) updates the optimism and pessimism bias'''
# all rating is the ratings by a rater
all_rating = dict()
i = 0
while i<len(new_arrays):
if new_arrays[i]['from'] in all_rating.keys():
all_rating[new_arrays[i]['from']].append(new_arrays[i]['value'])
else:
all_rating[new_arrays[i]['from']] = [new_arrays[i]['value']] # (by mark) append all the rating values in a list for each rater
i+=1
averages = dict()
for k in all_rating.keys(): # (by mark) for each rater
averages[k] = np.mean(all_rating[k]) # (by mark) take the average of the ratings for each user (this will be kind of the bias of this period bias)
unique_ids = []
for k in averages.keys(): # (by mark) for each user
if k in unique_ids:
pass
else:
unique_ids.append(k)
for k in previous_bias.keys():
if k in unique_ids:
pass
else:
unique_ids.append(k)
for k in averages.keys():
if k in unique_ids:
pass
else:
unique_ids.append(k)
new_bias = dict()
for k in unique_ids:
if k in averages.keys() and k in previous_bias.keys():
new_bias[k] = averages[k] * (1-conservatism) + conservatism * previous_bias[k]
else:
if k in averages.keys():
new_bias[k] = averages[k] * (1-conservatism) + conservatism ### This is how we are supposed to
### treat first customer based on the https://docs.google.com/document/d/1-O7avb_zJKvCXRvD0FmvyoVZdSmkDMlXRB5wsavpSAM/edit#
if k in previous_bias.keys():
new_bias[k] = previous_bias[k]
return(new_bias,averages) # the new updated bias
def fix_rater_bias(new_array,biases,average):
i = 0
while i<len(new_array):
if new_array[i]['from'] in average.keys():
new_array[i]['value'] = new_array[i]['value'] * (1-my_round(average[new_array[i]['from']],0))
else:
new_array[i]['value'] = new_array[i]['value']
i+=1
return (new_array)
### Get updated reputations, new calculations of them...
### We calculate differential here.
def calculate_new_reputation(logging,new_array,to_array,reputation,rating,precision,previous_rep,default,unrated,normalizedRanks=True,weighting=True,denomination=True,liquid = False,logratings=False,logranks=True,predictiveness = 0,predictive_data = dict()):
### The output will be mys; this is the rating for that specific day (or time period).
### This is needed; first create records for each id.
# (by mark) mys is a differential.
mys = {}
myd = {} # denominators
i = 0
while i<len(new_array): #(by mark) i think no. of interactions
if new_array[i][1] in mys:
pass
else:
### We first set all differential ids to 0.
mys[new_array[i][1]] = 0
i+=1
## getting the formula for mys.
unique_ids = np.unique(to_array) #(by mark) unique ids are the unique ids of suppliers
k=0
i = 0
to_array = np.array(to_array)
### Formula differs based on conditions. If ratings are included, formula includes ratings, then there are weights, etc.
prev_rep1 = dict()
prev_rep1a = dict()
while i<len(unique_ids): #(by mark) for each supplier, calculate its reputation
# note that reputation is sum(rating*weight*rater reputaion prev)
amounts = []
denominators = []
### Here we get log transformation of each amount value.
# (by mark) get_subset is the consumers who have rated supplier unique_ids[i]
get_subset = np.where(to_array==unique_ids[i])[0]
###############################################very important piece
for k in get_subset: #(by mark) for each rater for supplier i
if weighting:
### Calculate ratings and weights.
new_rating, new_weight = weight_calc(new_array[k],logratings,precision,weighting)
### Then we multiply this with rater's current reputation. Few options are taken into account, such as
### if it is liquid reputation, then we set it to 1...
my_rater_rep, prev_rep1 = rater_reputation(reputation,new_array[k][0],default,previous_rep,liquid,new_array[k][1],predictiveness,predictive_data)
for k in prev_rep1.keys():
prev_rep1a[k] = prev_rep1[k]
amounts.append(new_rating * my_rater_rep)
text = "from: " + str(new_array[i][0]) + ", to: " + str(new_array[i][1]) + ", value: " + str(new_array[i][2]) + ", weight: " + str(new_array[i][3])," calculated rating: ",new_rating
logging.debug(text)
### if we have weights and denomination, then we append some denominators.
if denomination and new_weight is not None:
denominators.append(new_weight) # denomination by sum of weights in such case
else:
new_rating, new_weight = weight_calc(new_array[k],logratings,precision,weighting)
new_rating = my_round(new_rating,0)
my_rater_rep, prev_rep1 = rater_reputation(reputation,new_array[k][0],default,previous_rep,liquid,new_array[k][1],predictiveness,predictive_data)
for k in prev_rep1.keys():
prev_rep1a[k] = prev_rep1[k]
amounts.append(new_rating * my_rater_rep)
text = "from: " + new_array[i][0] + ", to: " + str(new_array[i][1]) + ", value: " + str(new_array[i][2]) + ", weight: " + str(new_array[i][3])," calculated rating: ",str(new_rating)
logging.debug(text)
#no need for denomination by sum of weights in such case
### After we are done collecting sums for certain ids, we sum up everything we have.
mys[unique_ids[i]] = sum(amounts) # (by mark) sum up the reputations for all raters (buyers)
### If we have denominators, we also sum them up.
if weighting:
if len(denominators) > 0:
myd[unique_ids[i]] = sum(denominators)
#
i+=1 #(by mark) go to the next supplier and calculate his reputation
### Let's update the records from previous reputations and how we update them (for raters)
for k in prev_rep1a.keys(): # (by mark) can we just assign instead of for loop? previous_rep=prev_rep1a
previous_rep[k] = prev_rep1a[k]
### If we have weighting and denomination, then we
if weighting:
if denomination and len(mys) == len(myd):
for k, v in mys.items():
### divide mys values with denomination values.
mys[k] = v / myd[k]
### nr 5.
### Here we make trasformation in the same way as described in point 5 in documentation doc.
text = "Differential before log transformation: " + str(mys)
logging.debug(text)
if logranks:
for k in mys.keys():
if mys[k]<0:
mys[k] = -np.log10(1 - mys[k])
else:
mys[k] = np.log10(1 + mys[k])
logging.debug(text)
return(mys,previous_rep)
### normalizing differential.
def normalized_differential(mys,normalizedRanks,our_default,spendings,log=True):
### Nr 6;
### We divide it by max value, as specified. There are different normalizations possible...
### lograting transformation as made by Anton. Since this was done few lines above, I believe this is redundant coding.
if log:
for k in mys.keys():
mys[k] = -np.log10(1 - mys[k]) if mys[k] < 0 else np.log10(1 + mys[k])
### It could as well be deleted; in this case test_spendings_normalization wll have different result.
### Then we use maximums, either from what we have or set it to one by default.
max_value = max(mys.values(), default=1)
min_value = min(mys.values(), default=0)
if max_value==0: #normalized zeroes are just zeroes
return(mys)
### Now some rare cases, such as we have only one value of differential and what to do then.
if max_value==min_value:
min_value = max_value - our_default ### as the solution to issue #157
if min_value==max_value and spendings>0:
min_value = max_value - 1
### Now, way of solving this problem in a bit more common way:
for k in mys.keys():
if max_value==min_value:
### Still looking at a special case when max_value==min_value.
mys[k] = (mys[k]-min_value)
else:
if normalizedRanks: ### normalizedRanks is equal to fullnorm.
### Then we normalized based on whether we have normalizedRanks or not.
mys[k] = (mys[k]-min_value) /(max_value-min_value)
else:
mys[k] = mys[k] /max_value
return(mys)
### Get updated reputations, new calculations of them...
### This one is with log...
def rater_reputation(previous_reputations,rater_id,default,prev_reputation,liquid=False,to_id = [],predictiveness = 0,predictive_data = dict()):
### Assigning rater reputation. It is not trivial; if liquid=True, then we can expect that
if rater_id in previous_reputations.keys():
### Checks whether it's liquid or not. If liquid, return 1, otherwise previous reputation.
if (not liquid):
rater_rep = 1
else:
if rater_id in prev_reputation:
rater_rep = previous_reputations[rater_id] * 100
else:
rater_rep = previous_reputations[rater_id] * 100
else:
if (not liquid):
rater_rep = 1
else:
rater_rep = default * 100
### If it is not in reputations up to the current one, we set a default value.
if predictiveness>0:
if rater_id not in predictive_data.keys():
pass # Do nothing
#raise ValueError('Calling for predictiveness without previous predictive data.')
else:
rater_rep = rater_rep * (1-predictiveness) + predictiveness * predictive_data[rater_id] * rater_rep
previous_rep1 = dict()
for k in prev_reputation.keys():
previous_rep1[k] = prev_reputation[k]
previous_rep1[rater_id] = rater_rep
return(rater_rep,previous_rep1)
### Another normalization. This one is intended for reputation normalization.
def normalize_reputation(reputation,new_array,unrated,default1,decay,conservatism,normalizedRanks=False):
max_value = max(reputation.values(), default=1)
min_value = min(reputation.values(), default=0)
### First we make the same max/min values.
for k in reputation.keys():
if normalizedRanks: ### normalizedRanks is equal to fullnorm.
if max_value!=min_value:
reputation[k] = (reputation[k]-min_value) /(max_value-min_value) #(by mark) if normalizedRanks or fullnorm
else:
pass
else:
### Now, way of solving this problem in a bit more common way:
if max_value!= 0:
reputation[k] = reputation[k] /max_value # (by mark) this is the algorithm here
else:
pass
i = 0
### if unrated=False, we discount new agents for conservativity and decay.
while i<len(new_array):
if unrated:
if new_array[i][0] in reputation.keys():
pass
else:
reputation[new_array[i][0]] = conservatism * default1 + (1-conservatism) * decay
i+=1
return(reputation)
### Initialize dictionary with all keys from our dataset and 0 values;
def initialize_dict(from_array,to_array):
mydict = {}
for k in np.unique(from_array):
if k in mydict.keys():
pass
## If we do not have this record, we set it default reputation.
else:
mydict[str(k)] = 0
for k in np.unique(to_array):
if k in mydict.keys():
pass
## If we do not have this record, we set it default reputation.
else:
mydict[str(k)] = 0
return(mydict)
### Updating reputation - blending differential and reputation.
### In original paper, there were a few proposed ways of updating and approach d has been found to be the most
### useful and the only one we are using at the moment.
def update_reputation_approach_d(first_occurance,reputation,mys,since,our_date,default_rep,conservativity):
### Our current approach of updating reputation each time period.
j = 0
all_keys = set(mys.keys())
for k in reputation.keys():
if k in all_keys:
### for everything inside reputation and differential, this is the equation we a reusing-
reputation[k] = (1-conservativity) * mys[k] + conservativity * reputation[k]
else:
### when in reputation, but not in differential, this is what we do:
reputation[k] = (1-conservativity) * default_rep + conservativity * reputation[k]
j+=1
return(reputation)
### spendings_based function. So, we look at 'from' transactions and use the same calculation as
### for normal differential, except that we use it for 'from' ids.
def spending_based(transactions,som_dict,logratings,precision,weighting):
i=0
while i<len(transactions):
if transactions[i][0] in som_dict.keys():
if not weight_calc(transactions[i],logratings,precision,weighting)[1]==None:
som_dict[transactions[i][0]] += weight_calc(transactions[i],logratings,precision,weighting)[1]
else:
som_dict[transactions[i][0]] += weight_calc(transactions[i],logratings,precision,weighting)[0]
### Not sure about above fix, but sometimes we have none value if weighting=False. This should fix it...
else:
if not weight_calc(transactions[i],logratings,precision,weighting)[1]==None:
som_dict[transactions[i][0]] = weight_calc(transactions[i],logratings,precision,weighting)[1]### changed from
#### new_rating instead of new_weight.
else:
som_dict[transactions[i][0]] = weight_calc(transactions[i],logratings,precision,weighting)[0]
i+=1
return(som_dict)
### An alternative to np.where - created because sometimes there could be problems with former.
def where(to_array,the_id):
our_ids = []
i=0
while i<len(to_array):
if to_array[i]==the_id:
our_ids.append(i)
i+=1
return(our_ids)
### average_individual_rating_by_period
def calculate_average_individual_rating_by_period(transactions,weighted):
count_trans = dict()
#if weighted:
ratings_avg = dict()
i = 0
while i<len(transactions):
if transactions[i][0] in ratings_avg.keys():
if transactions[i][1] in ratings_avg[transactions[i][0]].keys():
ratings_avg[transactions[i][0]][transactions[i][1]].append(transactions[i][3]) ### should be value append.
count_trans[transactions[i][0]][transactions[i][1]] += 1
else:
ratings_avg[transactions[i][0]][transactions[i][1]] = [transactions[i][3]]
count_trans[transactions[i][0]][transactions[i][1]] = 1
else:
ratings_avg[transactions[i][0]] = dict()
count_trans[transactions[i][0]] = dict()
ratings_avg[transactions[i][0]][transactions[i][1]] = [transactions[i][3]]
count_trans[transactions[i][0]][transactions[i][1]] = 1
i+=1
### Now we make averages over everything.
for k in ratings_avg.keys():
for j in ratings_avg[k].keys():
ratings_avg[k][j] = np.mean(ratings_avg[k][j])
return(ratings_avg,count_trans)
def max_date(mydict):
### Get dictionary where keys are dates and we get the value of last date;
sorted_days = sorted(mydict.keys())
last_date = sorted_days[-1]
i = 0
return(mydict[last_date])
### function of predictiveness
def update_predictiveness_data(predictive_data,mydate,reputations,transactions,conservatism):
ids_used = []
for k in transactions:
from_id = k
ids_used.append(k)
if from_id not in predictive_data.keys():
predictive_data[from_id] = dict()
for to_id in transactions[from_id]:
if to_id in predictive_data[from_id].keys():
predictive_data[from_id][to_id] = transactions[from_id][to_id] * (1-conservatism) + conservatism * predictive_data[from_id][to_id] ### mydate should not exist yet in our run.
else:
predictive_data[from_id][to_id] = dict()
predictive_data[from_id][to_id] = transactions[from_id][to_id] #
return(predictive_data,ids_used)
def normalize_individual_data(mydate,new_ids):
all_from = new_ids.keys()
max_values = dict()
for k in all_from.keys():
max_values[k] = []
for j in all_from[k].keys():
if mydate in all_from[k][j].keys():
max_values.append(all_from[k][j][mydate])
### ok, now we've added values to max_values. We continue with another, similar loop;
max_value = max(max_values)
for j in all_from[k].keys():
if mydate in all_from[k][j].keys():
all_from[k][j][mydate] = all_from[k][j][mydate]/max_values
def calculate_distance(previous_individual_rating,curret_reputation_rank):
distance = 0
j = 0
while j<len(previous_individual_rating):
distance += (curret_reputation_rank[j]/1 - previous_individual_rating[j]/1)**2
j+=1
distance = distance/len(previous_individual_rating)
return(np.sqrt(distance))
def normalize_correlations(mydict):
mymax = max(mydict.values())
for k in mydict.keys():
mydict[k] = mydict[k]/mymax
return(mydict)
|
the-stack_106_15625
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import shutil
from pathlib import Path
from typing import Generator
import matplotlib.pyplot as plt
import pytest
import torch
import torch.nn as nn
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch
from rasterio.crs import CRS
import torchgeo.datasets.utils
from torchgeo.datasets import (
BoundingBox,
Chesapeake13,
ChesapeakeCVPR,
IntersectionDataset,
UnionDataset,
)
def download_url(url: str, root: str, *args: str, **kwargs: str) -> None:
shutil.copy(url, root)
class TestChesapeake13:
@pytest.fixture
def dataset(
self, monkeypatch: Generator[MonkeyPatch, None, None], tmp_path: Path
) -> Chesapeake13:
pytest.importorskip("zipfile_deflate64")
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.chesapeake, "download_url", download_url
)
md5 = "fe35a615b8e749b21270472aa98bb42c"
monkeypatch.setattr(Chesapeake13, "md5", md5) # type: ignore[attr-defined]
url = os.path.join(
"tests", "data", "chesapeake", "BAYWIDE", "Baywide_13Class_20132014.zip"
)
monkeypatch.setattr(Chesapeake13, "url", url) # type: ignore[attr-defined]
monkeypatch.setattr( # type: ignore[attr-defined]
plt, "show", lambda *args: None
)
root = str(tmp_path)
transforms = nn.Identity() # type: ignore[attr-defined]
return Chesapeake13(root, transforms=transforms, download=True, checksum=True)
def test_getitem(self, dataset: Chesapeake13) -> None:
x = dataset[dataset.bounds]
assert isinstance(x, dict)
assert isinstance(x["crs"], CRS)
assert isinstance(x["mask"], torch.Tensor)
def test_and(self, dataset: Chesapeake13) -> None:
ds = dataset & dataset
assert isinstance(ds, IntersectionDataset)
def test_or(self, dataset: Chesapeake13) -> None:
ds = dataset | dataset
assert isinstance(ds, UnionDataset)
def test_already_extracted(self, dataset: Chesapeake13) -> None:
Chesapeake13(root=dataset.root, download=True)
def test_already_downloaded(self, tmp_path: Path) -> None:
url = os.path.join(
"tests", "data", "chesapeake", "BAYWIDE", "Baywide_13Class_20132014.zip"
)
root = str(tmp_path)
shutil.copy(url, root)
Chesapeake13(root)
def test_not_downloaded(self, tmp_path: Path) -> None:
with pytest.raises(RuntimeError, match="Dataset not found"):
Chesapeake13(str(tmp_path), checksum=True)
def test_plot(self, dataset: Chesapeake13) -> None:
query = dataset.bounds
x = dataset[query]
dataset.plot(x, suptitle="Test")
def test_plot_prediction(self, dataset: Chesapeake13) -> None:
query = dataset.bounds
x = dataset[query]
x["prediction"] = x["mask"].clone()
dataset.plot(x, suptitle="Prediction")
def test_url(self) -> None:
ds = Chesapeake13(os.path.join("tests", "data", "chesapeake", "BAYWIDE"))
assert "cicwebresources.blob.core.windows.net" in ds.url
def test_invalid_query(self, dataset: Chesapeake13) -> None:
query = BoundingBox(0, 0, 0, 0, 0, 0)
with pytest.raises(
IndexError, match="query: .* not found in index with bounds:"
):
dataset[query]
class TestChesapeakeCVPR:
@pytest.fixture(
params=[
("naip-new", "naip-old", "nlcd"),
("landsat-leaf-on", "landsat-leaf-off", "lc"),
("naip-new", "landsat-leaf-on", "lc", "nlcd", "buildings"),
("naip-new", "prior_from_cooccurrences_101_31_no_osm_no_buildings"),
]
)
def dataset(
self,
request: SubRequest,
monkeypatch: Generator[MonkeyPatch, None, None],
tmp_path: Path,
) -> ChesapeakeCVPR:
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.chesapeake, "download_url", download_url
)
monkeypatch.setattr( # type: ignore[attr-defined]
ChesapeakeCVPR,
"md5s",
{
"base": "882d18b1f15ea4498bf54e674aecd5d4",
"prior_extension": "677446c486f3145787938b14ee3da13f",
},
)
monkeypatch.setattr( # type: ignore[attr-defined]
ChesapeakeCVPR,
"urls",
{
"base": os.path.join(
"tests",
"data",
"chesapeake",
"cvpr",
"cvpr_chesapeake_landcover.zip",
),
"prior_extension": os.path.join(
"tests",
"data",
"chesapeake",
"cvpr",
"cvpr_chesapeake_landcover_prior_extension.zip",
),
},
)
monkeypatch.setattr( # type: ignore[attr-defined]
ChesapeakeCVPR,
"files",
["de_1m_2013_extended-debuffered-test_tiles", "spatial_index.geojson"],
)
root = str(tmp_path)
transforms = nn.Identity() # type: ignore[attr-defined]
return ChesapeakeCVPR(
root,
splits=["de-test"],
layers=request.param,
transforms=transforms,
download=True,
checksum=True,
)
def test_getitem(self, dataset: ChesapeakeCVPR) -> None:
x = dataset[dataset.bounds]
assert isinstance(x, dict)
assert isinstance(x["crs"], CRS)
assert isinstance(x["mask"], torch.Tensor)
def test_and(self, dataset: ChesapeakeCVPR) -> None:
ds = dataset & dataset
assert isinstance(ds, IntersectionDataset)
def test_or(self, dataset: ChesapeakeCVPR) -> None:
ds = dataset | dataset
assert isinstance(ds, UnionDataset)
def test_already_extracted(self, dataset: ChesapeakeCVPR) -> None:
ChesapeakeCVPR(root=dataset.root, download=True)
def test_already_downloaded(self, tmp_path: Path) -> None:
root = str(tmp_path)
shutil.copy(
os.path.join(
"tests", "data", "chesapeake", "cvpr", "cvpr_chesapeake_landcover.zip"
),
root,
)
shutil.copy(
os.path.join(
"tests",
"data",
"chesapeake",
"cvpr",
"cvpr_chesapeake_landcover_prior_extension.zip",
),
root,
)
ChesapeakeCVPR(root)
def test_not_downloaded(self, tmp_path: Path) -> None:
with pytest.raises(RuntimeError, match="Dataset not found"):
ChesapeakeCVPR(str(tmp_path), checksum=True)
def test_out_of_bounds_query(self, dataset: ChesapeakeCVPR) -> None:
query = BoundingBox(0, 0, 0, 0, 0, 0)
with pytest.raises(
IndexError, match="query: .* not found in index with bounds:"
):
dataset[query]
def test_multiple_hits_query(self, dataset: ChesapeakeCVPR) -> None:
ds = ChesapeakeCVPR(
root=dataset.root, splits=["de-train", "de-test"], layers=dataset.layers
)
with pytest.raises(
IndexError, match="query: .* spans multiple tiles which is not valid"
):
ds[dataset.bounds]
|
the-stack_106_15626
|
import pandas as pd
from sklearn import preprocessing
from etna.transforms.base import Transform
class SegmentEncoderTransform(Transform):
"""Encode segment label to categorical. Creates column 'regressor_segment_code'."""
idx = pd.IndexSlice
def __init__(self):
self._le = preprocessing.LabelEncoder()
def fit(self, df: pd.DataFrame) -> "SegmentEncoderTransform":
"""
Fit encoder on existing segment labels.
Parameters
----------
df:
dataframe with data to fit label encoder.
Returns
-------
self
"""
segment_columns = df.columns.get_level_values("segment")
self._le.fit(segment_columns)
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Get encoded (categorical) for each segment.
Parameters
----------
df:
dataframe with data to transform.
Returns
-------
result dataframe
"""
encoded_matrix = self._le.transform(self._le.classes_)
encoded_matrix = encoded_matrix.reshape(len(self._le.classes_), -1).repeat(len(df), axis=1).T
encoded_df = pd.DataFrame(
encoded_matrix,
columns=pd.MultiIndex.from_product(
[self._le.classes_, ["regressor_segment_code"]], names=("segment", "feature")
),
index=df.index,
)
encoded_df = encoded_df.astype("category")
df = df.join(encoded_df)
df = df.sort_index(axis=1)
return df
|
the-stack_106_15627
|
import numpy as np
def sample_weighted(p_dict):
ps = list(p_dict.keys())
key = np.random.choice(ps)
return p_dict[key]
def move_bb(bbs, t):
"""
Translate the bounding-boxes in by t_x,t_y.
BB : 2x4xn
T : 2-long np.array
"""
return bbs + t[:, None, None]
def crop_safe(arr, rect, bbs=[], pad=0):
"""
ARR : arr to crop
RECT: (x,y,w,h) : area to crop to
BBS : nx4 xywh format bounding-boxes
PAD : percentage to pad
Does safe cropping. Returns the cropped rectangle and
the adjusted bounding-boxes
"""
rect = np.array(rect)
rect[:2] -= pad
rect[2:] += 2 * pad
v0 = [max(0, rect[0]), max(0, rect[1])]
v1 = [
min(arr.shape[0], rect[0] + rect[2]),
min(arr.shape[1], rect[1] + rect[3])
]
arr = arr[v0[0]:v1[0], v0[1]:v1[1], ...]
if len(bbs) > 0:
for i in range(len(bbs)):
bbs[i, 0] -= v0[0]
bbs[i, 1] -= v0[1]
return arr, bbs
else:
return arr
|
the-stack_106_15631
|
import os
import pandas as pd
from glob import glob
def get_options():
import argparse
parser = argparse.ArgumentParser(
description='Creating training on heatmaps')
parser.add_argument('--input_table',
metavar="str", type=str,
help='original xml table')
parser.add_argument('--folder_to_check',
metavar="str", type=str,
help='folder to compare with')
parser.add_argument('--output_table',
metavar="int", type=str,
help='output csv table name')
args = parser.parse_args()
return args
FEATURES = ['Biopsy',
'Index mitotique',
'Grade EE',
'% stroma',
'% cellules tumorales (dont CIS)',
'cis/carcinome total',
'%TIL',
'% Stromal lymphocytes',
'RCB',
'RCB class'
]
def load_custom_xlsx(path):
sheet_name = 'TRIPLE NEGATIF'
df_features = pd.read_excel(path, sheetname=sheet_name)[FEATURES]
df_features["Grade EE 1"] = (df_features['Grade EE'] == 1).astype(int)
df_features["Grade EE 2"] = (df_features['Grade EE'] == 2).astype(int)
df_features["Grade EE 3"] = (df_features['Grade EE'] == 3).astype(int)
df_features = df_features.drop("Grade EE", axis=1)
df_features = df_features.dropna()
# remove patient with string in numeric column
df_features = df_features.drop(32).reset_index()
df_features['%TIL'] = df_features['%TIL'].astype('int')
df_features = df_features.drop('index', axis=1)
df_features = df_features.set_index('Biopsy')
return df_features
def split(stri):
try:
name = int(os.path.basename(stri).split('.')[0])
except:
name = os.path.basename(stri).split('.')[0]
return name
def f(val):
if val == "pCR":
res = 0
elif val == "RCB-I":
res = 1
elif val == "RCB-II":
res = 2
else:
res = 3
return res
def main():
options = get_options()
table = load_custom_xlsx(options.input_table)
which_tiff_exist = [split(f) for f in glob(options.folder_to_check + '/*.tiff')]
table.columns = ["index_mitotique", "stroma", "cancer", "cis", "til", "stroma_lym", 'RCB', 'RCB_class', "grade_1", "grade_2", "grade_3"]
table = table[["index_mitotique", "stroma", "cancer", "cis", "til", "stroma_lym", "grade_1", "grade_2", "grade_3", 'RCB', 'RCB_class']]
table['RCB_class'] = table[["RCB_class"]].apply(lambda row: f(row["RCB_class"]), axis=1)
table.ix[which_tiff_exist].to_csv(options.output_table)
if __name__ == '__main__':
main()
|
the-stack_106_15632
|
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# initialize the camera and grab a reference to the raw camera capture
RESOLUTION = (640, 320)
def livestream():
camera = PiCamera()
camera.resolution = RESOLUTION
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=RESOLUTION)
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(
rawCapture, format="bgr", use_video_port=True
):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
# show the frame
cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
if __name__ == "__main__":
livestream()
|
the-stack_106_15633
|
"""
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
See <https://Python-Markdown.github.io/extensions/fenced_code_blocks>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from textwrap import dedent
from . import Extension
from ..preprocessors import Preprocessor
from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines
from .attr_list import get_attrs, AttrListExtension
from ..util import parseBoolValue
import re
class FencedCodeExtension(Extension):
def __init__(self, **kwargs):
self.config = {
'lang_prefix': ['language-', 'Prefix prepended to the language. Default: "language-"']
}
super().__init__(**kwargs)
def extendMarkdown(self, md):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.register(FencedBlockPreprocessor(md, self.getConfigs()), 'fenced_code_block', 25)
class FencedBlockPreprocessor(Preprocessor):
FENCED_BLOCK_RE = re.compile(
dedent(r'''
(?P<fence>^(?:~{3,}|`{3,}))[ ]* # opening fence
((\{(?P<attrs>[^\}\n]*)\})| # (optional {attrs} or
(\.?(?P<lang>[\w#.+-]*)[ ]*)? # optional (.)lang
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot)[ ]*)?) # optional hl_lines)
\n # newline (end of opening fence)
(?P<code>.*?)(?<=\n) # the code block
(?P=fence)[ ]*$ # closing fence
'''),
re.MULTILINE | re.DOTALL | re.VERBOSE
)
def __init__(self, md, config):
super().__init__(md)
self.config = config
self.checked_for_deps = False
self.codehilite_conf = {}
self.use_attr_list = False
# List of options to convert to bool values
self.bool_options = [
'linenums',
'guess_lang',
'noclasses',
'use_pygments'
]
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash. """
# Check for dependent extensions
if not self.checked_for_deps:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.getConfigs()
if isinstance(ext, AttrListExtension):
self.use_attr_list = True
self.checked_for_deps = True
text = "\n".join(lines)
while 1:
m = self.FENCED_BLOCK_RE.search(text)
if m:
lang, id, classes, config = None, '', [], {}
if m.group('attrs'):
id, classes, config = self.handle_attrs(get_attrs(m.group('attrs')))
if len(classes):
lang = classes.pop(0)
else:
if m.group('lang'):
lang = m.group('lang')
if m.group('hl_lines'):
# Support hl_lines outside of attrs for backward-compatibility
config['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlight the code
if self.codehilite_conf and self.codehilite_conf['use_pygments'] and config.get('use_pygments', True):
local_config = self.codehilite_conf.copy()
local_config.update(config)
# Combine classes with cssclass. Ensure cssclass is at end
# as pygments appends a suffix under certain circumstances.
# Ignore ID as Pygments does not offer an option to set it.
if classes:
local_config['css_class'] = '{} {}'.format(
' '.join(classes),
local_config['css_class']
)
highliter = CodeHilite(
m.group('code'),
lang=lang,
style=local_config.pop('pygments_style', 'default'),
**local_config
)
code = highliter.hilite()
else:
id_attr = lang_attr = class_attr = kv_pairs = ''
if lang:
lang_attr = ' class="{}{}"'.format(self.config.get('lang_prefix', 'language-'), lang)
if classes:
class_attr = ' class="{}"'.format(' '.join(classes))
if id:
id_attr = ' id="{}"'.format(id)
if self.use_attr_list and config and not config.get('use_pygments', False):
# Only assign key/value pairs to code element if attr_list ext is enabled, key/value pairs
# were defined on the code block, and the `use_pygments` key was not set to True. The
# `use_pygments` key could be either set to False or not defined. It is omitted from output.
kv_pairs = ' ' + ' '.join(
'{k}="{v}"'.format(k=k, v=v) for k, v in config.items() if k != 'use_pygments'
)
code = '<pre{id}{cls}><code{lang}{kv}>{code}</code></pre>'.format(
id=id_attr,
cls=class_attr,
lang=lang_attr,
kv=kv_pairs,
code=self._escape(m.group('code'))
)
placeholder = self.md.htmlStash.store(code)
text = '{}\n{}\n{}'.format(text[:m.start()],
placeholder,
text[m.end():])
else:
break
return text.split("\n")
def handle_attrs(self, attrs):
""" Return tuple: (id, [list, of, classes], {configs}) """
id = ''
classes = []
configs = {}
for k, v in attrs:
if k == 'id':
id = v
elif k == '.':
classes.append(v)
elif k == 'hl_lines':
configs[k] = parse_hl_lines(v)
elif k in self.bool_options:
configs[k] = parseBoolValue(v, fail_on_errors=False, preserve_none=True)
else:
configs[k] = v
return id, classes, configs
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(**kwargs): # pragma: no cover
return FencedCodeExtension(**kwargs)
|
the-stack_106_15634
|
import json
import logging
import string
from datetime import datetime
from random import choices
from .agent import Agent
class Manager:
def __init__(self, world, agent_paths=None, agents=None):
self.world = world
self._replay_enable = True
self._replay_filename = None
self._tick = 1
self._stop = False
self._set_replay_file()
if agents:
self.agents = agents
else:
self.agents = [
Agent(agent_path, time_config=world.initial_config)
for agent_path in agent_paths
]
def _set_replay_file(self):
if not self._replay_enable:
return
now = datetime.now()
random_string = "".join(
choices(
string.ascii_lowercase + string.ascii_uppercase + string.digits, k=6
)
)
random_part = "_".join([now.strftime("%y%m%d_%H%M%S"), random_string])
self._replay_filename = f"replay_{self.world.name}_{random_part}.jsonl"
def start(self):
for agent in self.agents:
agent.start()
self.world.register_agent(agent)
agent.ping()
agent.set_config(self.world.config)
self._check_for_tainted_agents()
logging.info("started")
def ping(self):
for agent in self.agents:
agent.ping()
self._check_for_tainted_agents()
logging.info("ping completed")
def loop(self):
while not self.world.finished:
self.tick()
if self._check_for_tainted_agents():
break
def tick(self):
if self.world.config["update_mode"] == "ALTERNATING":
# Participants play in alternating order, like chess
self._tick_alternating()
elif self.world.config["update_mode"] == "SIMULTANEOUS":
# Participants all play at the same time
self._tick_simultaneous()
elif self.world.config["update_mode"] == "ISOLATED":
# Participants play independent and isolated game instances
# Example: Two agents play tetris and the one with the highest score wins
self._tick_isolated()
else:
raise ValueError(
f"{self.world.config['update_mode']} is not a valid update mode"
)
logging.info(f"tick {self._tick}")
self._tick += 1
def _tick_alternating(self):
world_state = self.world.state
world_state["epoch"] = self._tick
world_state["agent_ids"] = [agent.id for agent in self.agents]
agent_to_update = self._get_agent(self.world.agent_to_move)
agent_to_update.update_state(world_state)
agent_actions = [agent_to_update.get_actions()]
self._save_replay(world_state, agent_actions)
self.world.update(agent_actions)
def _tick_simultaneous(self):
world_state = self.world.state
world_state["epoch"] = self._tick
world_state["agent_ids"] = [agent.id for agent in self.agents]
for agent in self.agents:
agent.update_state(world_state)
agent_actions = [agent.get_actions() for agent in self.agents]
self._save_replay(world_state, agent_actions)
self.world.update(agent_actions)
def _tick_isolated(self):
world_state = self.world.state
base_state = {}
base_state["epoch"] = self._tick
base_state["agent_ids"] = [agent.id for agent in self.agents]
agent_states = world_state.pop("state_by_agent")
for agent in self.agents:
agent_state = {**base_state, **agent_states[agent.id]}
agent.update_state(agent_state)
agent_actions = [agent.get_actions() for agent in self.agents]
self._save_replay(world_state, agent_actions)
self.world.update(agent_actions)
def stop(self):
for agent in self.agents:
agent.stop()
logging.info("stopped")
@property
def results(self):
return {
"scores": self.scores,
"replay_file": self._replay_filename,
"outcome": self.world.outcome,
"has_tainted_agent": self.has_tainted_agent,
}
@property
def scores(self):
scores = []
for agent_id, score in self.world.scores.items():
agent = self._get_agent(agent_id)
scores.append(
{
"name": agent.name,
"version": agent.version,
"score": score,
"agent_id": agent_id,
"agent_path": agent.agent_path,
"tainted": agent.tainted,
"tainted_reason": agent.tainted_reason,
}
)
return sorted(scores, key=lambda x: x["score"], reverse=True)
@property
def has_tainted_agent(self):
return len(self.tainted_agents) > 0
@property
def tainted_agents(self):
return [agent for agent in self.agents if agent.tainted]
def _check_for_tainted_agents(self):
if not self.has_tainted_agent:
return False
self._stop = True
return True
def _save_replay(self, world_state, agent_actions):
if not self._replay_enable:
return
data = {
"game_config": self.world.config,
"epoch": self._tick,
"max_epoch": self.world.config["n_epochs"],
"world_state": world_state,
"agent_actions": agent_actions,
"agent_ids": [agent.id for agent in self.agents],
}
with open(self._replay_filename, "at") as f:
f.write(json.dumps(data))
f.write("\n")
def _get_agent(self, id):
return next((agent for agent in self.agents if agent.id == id), None)
|
the-stack_106_15635
|
# -*- coding: utf8 -*-fr
# pylint: disable=too-many-instance-attributes,invalid-name, too-many-statements
"""
ItopapiStorageSystem is an abstraction of StorageSystem representation on iTop
"""
from itopapi.model.prototype import ItopapiPrototype
from itopapi.model.datacenterDevice import ItopapiDatacenterDevice
from itopapi.model.features.hasOrganization import HasOrganization
from itopapi.model.features.hasLocation import HasLocation
from itopapi.model.features.hasBrand import HasBrand
from itopapi.model.features.hasModel import HasModel
from itopapi.model.features.hasRack import HasRack
from itopapi.model.features.hasEnclosure import HasEnclosure
from itopapi.model.features.hasPowerA import HasPowerA
from itopapi.model.features.hasPowerB import HasPowerB
__version__ = '1.0'
__authors__ = ['Julien Nauroy <[email protected]>']
class ItopapiStorageSystem(ItopapiDatacenterDevice, HasOrganization, HasLocation, HasBrand, HasModel,
HasRack, HasEnclosure, HasPowerA, HasPowerB):
"""
ItopapiStorageSystem is an object that represents a StorageSystem from iTop
"""
# Configuration specific to itop
itop = {
# Name of the class in Itop
'name': 'StorageSystem',
# Define which fields to save when creating or updating from the python API
'save': ['name', 'status', 'business_criticity', 'managementip',
'nb_u', 'serialnumber', 'asset_number', 'move2production',
'purchase_date', 'end_of_warranty', 'description'],
'foreign_keys': [
HasOrganization.foreign_key,
HasLocation.foreign_key,
HasBrand.foreign_key,
HasModel.foreign_key,
HasRack.foreign_key,
HasEnclosure.foreign_key,
HasPowerA.foreign_key,
HasPowerB.foreign_key,
],
'list_types': {
'contacts_list': 'Person',
},
}
@staticmethod
def find(key):
""" Retrieve one or mor instance of StorageSystem with the given key or criteria """
return ItopapiPrototype.find(ItopapiStorageSystem, key)
@staticmethod
def find_by_name(name):
return ItopapiPrototype.find_by_name(ItopapiStorageSystem, name)
@staticmethod
def find_all():
""" Retrieve all instance of StorageSystem """
return ItopapiPrototype.find_all(ItopapiStorageSystem)
def __init__(self, data=None):
super(ItopapiStorageSystem, self).__init__(data)
# StorageSystem's status. Values within [implementation, obsolete, production, stock]
self.status = None
# StorageSystem's business criticity. Values within [high, medium, low]
self.business_criticity = None
self.managementip = None
# Rack units
self.nb_u = None
# StorageSystem's serial number
self.serialnumber = None
# StorageSystem's asset number
self.asset_number = None
# StorageSystem's move to production date
self.move2production = None
# StorageSystem's purchase date
self.purchase_date = None
# StorageSystem's end of warranty date
self.end_of_warranty = None
# StorageSystem's description, as a free text
self.description = None
##############################
# Lists #
##############################
# StorageSystem's softwares list
self.softwares_list = {}
# StorageSystem's contacts list
self.contacts_list = {}
# StorageSystem's documents list
self.documents_list = {}
# StorageSystem's tickets list
self.tickets_list = {}
# StorageSystem's application solutions list
self.applicationsolution_list = {}
# StorageSystem's network interfaces list
self.physicalinterface_list = {}
# StorageSystem's FC ports list
self.fiberinterfacelist_list = {}
# StorageSystem's network devices list
self.networkdevice_list = {}
# StorageSystem's SANs list
self.san_list = {}
# StorageSystem's provider contracts list
self.providercontracts_list = {}
# StorageSystem's services list
self.services_list = {}
# LogicalVolume's list
self.logicalvolume_list = {}
# Register as a subclass of Datacenter Device
ItopapiDatacenterDevice.register(ItopapiStorageSystem)
|
the-stack_106_15637
|
# coding:utf-8
from __future__ import print_function
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "7"
import math
import time
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
import datetime
import argparse
import torch.optim as optim
import horovod.torch as hvd
import torch.utils.data as data
import torch.backends.cudnn as cudnn
from layers.modules import MultiBoxLoss
from models.retinaface import RetinaFace
from layers.functions.prior_box import PriorBox
# wider face dataset for trainig wider face
# from data import WiderFaceDetection, detection_collate, preproc, cfg_mnet, cfg_re50
# use for inner data
from data import CommonFaceDetectionDataSet, detection_collate, preproc, cfg_mnet, cfg_re50
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='Retinaface Training')
parser.add_argument('--training_dataset',
default='/data/remote/dataset/wider_face/widerface/train/label.txt', help='Training dataset directory')
parser.add_argument('--network', default='mobile0.25',
help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--num_workers', default=8, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--lr', '--learning-rate', default=1e-3,
type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--resume_net', default=None,
help='resume net for retraining')
parser.add_argument('--resume_epoch', default=0, type=int,
help='resume iter for retraining')
parser.add_argument('--weight_decay', default=5e-4,
type=float, help='Weight decay for SGD')
parser.add_argument('--gamma', default=0.1, type=float,
help='Gamma update for SGD')
parser.add_argument('--save_folder', default='/data/remote/github_code/face_detection/Pytorch_Retinaface/checkpoints',
help='Location to save checkpoint models')
parser.add_argument('--pretrain', default=0, help='widerface pretrain models')
parser.add_argument('--pretrain_model', default="/data/remote/github_code/face_detection/Pytorch_Retinaface/weights/mobilenet0.25_Final.pth", help="widerface pretrain cchekpoints")
parser.add_argument('--log_writer', default=1, help="write the training log")
parser.add_argument('--log_dir', default="/data/remote/code/sex_image_classification/output_dir/output_log/2020_4_13_sample_train_logdir", help="tensorboard log directory")
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
rgb_mean = (104, 117, 123) # bgr order
num_classes = 2
img_dim = cfg['image_size']
num_gpu = cfg['ngpu']
batch_size = cfg['batch_size']
max_epoch = cfg['epoch']
gpu_train = cfg['gpu_train']
num_workers = args.num_workers
momentum = args.momentum
weight_decay = args.weight_decay
initial_lr = args.lr
gamma = args.gamma
training_dataset = args.training_dataset
save_folder = args.save_folder
# model
net = RetinaFace(cfg=cfg)
print("Printing net...")
print(net)
if args.resume_net is not None:
print('Loading resume network...')
state_dict = torch.load(args.resume_net)
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
if args.pretrain:
print("Loading the pretrain model!!!")
state_dict = torch.load(args.pretrain_model, map_location="cpu")
net.load_state_dict(state_dict)
print("Load the pretrain model Finish!!!")
# need to change the hvd or ddp
if num_gpu > 1 and gpu_train:
net = torch.nn.DataParallel(net).cuda()
else:
net = net.cuda()
cudnn.benchmark = True
optimizer = optim.SGD(net.parameters(), lr=initial_lr,
momentum=momentum, weight_decay=weight_decay)
# loss function
criterion = MultiBoxLoss(num_classes, 0.35, True, 0, True, 7, 0.35, False)
# anchor box
priorbox = PriorBox(cfg, image_size=(img_dim, img_dim))
# generate the anchor
with torch.no_grad():
priors = priorbox.forward()
priors = priors.cuda()
def train():
net.train()
epoch = 0 + args.resume_epoch
print('Loading Dataset...')
# prepare the dataset
dataset = CommonFaceDetectionDataSet(training_dataset, preproc(img_dim, rgb_mean))
# dataset = WiderFaceDetection(training_dataset, preproc(img_dim, rgb_mean))
print("dataset", len(dataset))
epoch_size = math.ceil(len(dataset) / batch_size)
max_iter = max_epoch * epoch_size
stepvalues = (cfg['decay1'] * epoch_size, cfg['decay2'] * epoch_size)
step_index = 0
if args.resume_epoch > 0:
start_iter = args.resume_epoch * epoch_size
else:
start_iter = 0
for iteration in range(start_iter, max_iter):
if iteration % epoch_size == 0:
# create batch iterator
batch_iterator = iter(data.DataLoader(
dataset, batch_size, shuffle=True, num_workers=num_workers, collate_fn=detection_collate))
if (epoch % 10 == 0 and epoch > 0) or (epoch % 5 == 0 and epoch > cfg['decay1']):
torch.save(net.state_dict(), save_folder + '/' +
cfg['name'] + '_epoch_' + str(epoch) + '.pth')
epoch += 1
load_t0 = time.time()
if iteration in stepvalues:
step_index += 1
lr = adjust_learning_rate(
optimizer, gamma, epoch, step_index, iteration, epoch_size)
# load train data
images, targets = next(batch_iterator)
images = images.cuda()
targets = [anno.cuda() for anno in targets]
# forward
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c, loss_landm = criterion(out, priors, targets)
loss = cfg['loc_weight'] * loss_l + loss_c + loss_landm
loss.backward()
optimizer.step()
load_t1 = time.time()
batch_time = load_t1 - load_t0
eta = int(batch_time * (max_iter - iteration))
print('Epoch:{}/{} || Epochiter: {}/{} || Iter: {}/{} || Loc: {:.4f} Cla: {:.4f} Landm: {:.4f} || LR: {:.8f} || Batchtime: {:.4f} s || ETA: {}'
.format(epoch, max_epoch, (iteration % epoch_size) + 1,
epoch_size, iteration + 1, max_iter, loss_l.item(), loss_c.item(), loss_landm.item(), lr, batch_time, str(datetime.timedelta(seconds=eta))))
if args.log_writer:
record_log(log_writer, loss_l, loss_c, lr, iteration+1)
torch.save(net.state_dict(), save_folder + cfg['name'] + '_Final.pth')
# torch.save(net.state_dict(), save_folder + 'Final_Retinaface.pth')
def adjust_learning_rate(optimizer, gamma, epoch, step_index, iteration, epoch_size):
"""Sets the learning rate
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
warmup_epoch = -1
if epoch <= warmup_epoch:
lr = 1e-6 + (initial_lr-1e-6) * iteration / (epoch_size * warmup_epoch)
else:
lr = initial_lr * (gamma ** (step_index))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# add the tensorboard log
def record_log(log_writer, bbox_loss, class_loss, lr, batch_idx):
log_writer.add_scalar("train/bbox_loss", bbox_loss.data.item(), batch_idx)
log_writer.add_scalar("train/class_loss", class_loss.data.item(), batch_idx)
log_writer.add_scalar("learning_rate", lr, batch_idx)
if __name__ == '__main__':
log_writer = SummaryWriter(args.log_dir)
train()
|
the-stack_106_15638
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence-to-sequence model with an attention mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from book_code.chapter_05 import data_utils
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/abs/1412.2007
"""
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
with tf.device("/cpu:0"):
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(inputs, labels):
with tf.device("/cpu:0"):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [
tf.matmul(output, output_projection[0]) + output_projection[1]
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.all_variables())
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol.
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
|
the-stack_106_15640
|
from __future__ import print_function
from dogapi.common import is_p3k
get_input = input
if is_p3k():
import configparser
else:
get_input = raw_input
import ConfigParser as configparser
import os
import sys
try:
from UserDict import IterableUserDict
except ImportError:
from collections import UserDict as IterableUserDict
from dogapi import DogHttpApi
def print_err(msg):
if is_p3k():
print('ERROR: ' + msg + '\n', file=sys.stderr)
else:
sys.stderr.write(msg + '\n')
def report_errors(res):
if 'errors' in res:
for e in res['errors']:
print_err('ERROR: ' + e)
sys.exit(1)
return False
def report_warnings(res):
if 'warnings' in res:
for e in res['warnings']:
print_err('WARNING: ' + e)
return True
return False
class CommandLineClient(object):
def __init__(self, config):
self.config = config
self._dog = None
@property
def dog(self):
if not self._dog:
self._dog = DogHttpApi(self.config['apikey'], self.config['appkey'], swallow=True, json_responses=True)
return self._dog
class DogshellConfig(IterableUserDict):
def load(self, config_file, apikey, appkey):
config = configparser.ConfigParser()
if apikey is not None and appkey is not None:
self['apikey'] = apikey
self['appkey'] = appkey
else:
if os.access(config_file, os.F_OK):
config.read(config_file)
if not config.has_section('Connection'):
report_errors({'errors': ['%s has no [Connection] section' % config_file]})
else:
try:
response = ''
while response.strip().lower() not in ['y', 'n']:
response = get_input('%s does not exist. Would you like to create it? [Y/n] ' % config_file)
if response.strip().lower() in ['', 'y', 'yes']:
# Read the api and app keys from stdin
apikey = get_input('What is your api key? (Get it here: https://app.datadoghq.com/account/settings#api) ')
appkey = get_input('What is your application key? (Generate one here: https://app.datadoghq.com/account/settings#api) ')
# Write the config file
config.add_section('Connection')
config.set('Connection', 'apikey', apikey)
config.set('Connection', 'appkey', appkey)
f = open(config_file, 'w')
config.write(f)
f.close()
print('Wrote %s' % config_file)
elif response.strip().lower() == 'n':
# Abort
print_err('Exiting\n')
sys.exit(1)
except KeyboardInterrupt:
# Abort
print_err('\nExiting')
sys.exit(1)
self['apikey'] = config.get('Connection', 'apikey')
self['appkey'] = config.get('Connection', 'appkey')
assert self['apikey'] is not None and self['appkey'] is not None
|
the-stack_106_15644
|
import csv
import tabula
from Exceptions_stravenkovac import FormatIsInappropriate
from parser_interface import ParserInterface
import os
import re
import pandas as pd
class MonthlyHoursParser(ParserInterface):
def __init__(self, path, path_to):
self.initial_path = path
with open(path_to, 'a'):
pass
self.path_to_csv = path_to
self.filename = os.path.basename(self.initial_path)
def __is_float(self, string):
try:
float(string)
return True
except ValueError:
return False
def load_data_source(self):
# tabula.convert_into(self.initial_path, self.path_to_csv, output_format="csv", pages=[1], stream=True)
data_to_csv = pd.DataFrame()
name_and_date = tabula.read_pdf(self.initial_path, area=[0, 0, 150, 800], pages='all', stream=True) #, java_options ="-Dfile.encoding=UTF16"
# print(name_and_date)
# print(type(name_and_date[0]))
for dataframe in name_and_date:
data_to_csv = data_to_csv.append(dataframe)
print(data_to_csv)
data_to_csv.to_csv(self.path_to_csv)
def extract_data(self):
list_of_days = []
list_of_working_hours = []
name_str = None
counter = 0
time_str = "TIME"
hours_str = "Hours worked"
with open(self.path_to_csv, 'r') as read_obj:
while True:
line = read_obj.readline()
if len(line) == 0:
break
if time_str in line:
list_of_days = [s for s in re.split(",| ", line) if (self.__is_float(s) or s.isdigit())]
print(list_of_days, len(list_of_days))
if hours_str in line:
try:
list_of_working_hours = [s for s in re.split(",| ", line.split(')')[1]) if (self.__is_float(s) or s.isdigit())]
print(list_of_working_hours, len(list_of_working_hours))
except IndexError:
print("Could not parse the file {} correctly. Try to obtain/calculate the data by your own.".format(self.filename))
# for int_character in hours_str:
# if self.__is_float(int_character) or int_character.isdigit():
# counter += 1
# if counter != 0:
# list_of_working_hours = [s for s in re.split(",| ", line.split(')')[1]) if (self.__is_float(s) or s.isdigit())]
# print(list_of_working_hours, len(list_of_working_hours))
# else:
# error_format = "Could not parse the file {} correctly. Try to obtain/calculate the data by your own.".format(
# self.filename)
# raise Exception(error_format)
# line = read_obj.readline()
# matches = re.findall(r'\"(.+?)\"', line) #new_line = line.split("\"")
# print(matches)
# list_of_working_hours = [s for s in re.split(",| ", line) if (self.__is_float(s) or s.isdigit())]
# print(list_of_working_hours, len(list_of_working_hours))
|
the-stack_106_15648
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import os
import tempfile
import unittest.mock
from contextlib import redirect_stdout
from airflow import models
from airflow.cli import cli_parser
from airflow.cli.commands import variable_command
from airflow.models import Variable
from tests.test_utils.db import clear_db_variables
class TestCliVariables(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dagbag = models.DagBag(include_examples=True)
cls.parser = cli_parser.get_parser()
def setUp(self):
clear_db_variables()
def tearDown(self):
clear_db_variables()
def test_variables_set(self):
"""Test variable_set command"""
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'foo', 'bar']))
self.assertIsNotNone(Variable.get("foo"))
self.assertRaises(KeyError, Variable.get, "foo1")
def test_variables_get(self):
Variable.set('foo', {'foo': 'bar'}, serialize_json=True)
with redirect_stdout(io.StringIO()) as stdout:
variable_command.variables_get(self.parser.parse_args(['variables', 'get', 'foo']))
self.assertEqual('{\n "foo": "bar"\n}\n', stdout.getvalue())
def test_get_variable_default_value(self):
with redirect_stdout(io.StringIO()) as stdout:
variable_command.variables_get(
self.parser.parse_args(['variables', 'get', 'baz', '--default', 'bar'])
)
self.assertEqual("bar\n", stdout.getvalue())
def test_get_variable_missing_variable(self):
with self.assertRaises(SystemExit):
variable_command.variables_get(self.parser.parse_args(['variables', 'get', 'no-existing-VAR']))
def test_variables_set_different_types(self):
"""Test storage of various data types"""
# Set a dict
variable_command.variables_set(
self.parser.parse_args(['variables', 'set', 'dict', '{"foo": "oops"}'])
)
# Set a list
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'list', '["oops"]']))
# Set str
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'str', 'hello string']))
# Set int
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'int', '42']))
# Set float
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'float', '42.0']))
# Set true
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'true', 'true']))
# Set false
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'false', 'false']))
# Set none
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'null', 'null']))
# Export and then import
variable_command.variables_export(
self.parser.parse_args(['variables', 'export', 'variables_types.json'])
)
variable_command.variables_import(
self.parser.parse_args(['variables', 'import', 'variables_types.json'])
)
# Assert value
self.assertEqual({'foo': 'oops'}, Variable.get('dict', deserialize_json=True))
self.assertEqual(['oops'], Variable.get('list', deserialize_json=True))
self.assertEqual('hello string', Variable.get('str')) # cannot json.loads(str)
self.assertEqual(42, Variable.get('int', deserialize_json=True))
self.assertEqual(42.0, Variable.get('float', deserialize_json=True))
self.assertEqual(True, Variable.get('true', deserialize_json=True))
self.assertEqual(False, Variable.get('false', deserialize_json=True))
self.assertEqual(None, Variable.get('null', deserialize_json=True))
os.remove('variables_types.json')
def test_variables_list(self):
"""Test variable_list command"""
# Test command is received
variable_command.variables_list(self.parser.parse_args(['variables', 'list']))
def test_variables_delete(self):
"""Test variable_delete command"""
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'foo', 'bar']))
variable_command.variables_delete(self.parser.parse_args(['variables', 'delete', 'foo']))
self.assertRaises(KeyError, Variable.get, "foo")
def test_variables_import(self):
"""Test variables_import command"""
variable_command.variables_import(self.parser.parse_args(['variables', 'import', os.devnull]))
def test_variables_export(self):
"""Test variables_export command"""
variable_command.variables_export(self.parser.parse_args(['variables', 'export', os.devnull]))
def test_variables_isolation(self):
"""Test isolation of variables"""
tmp1 = tempfile.NamedTemporaryFile(delete=True)
tmp2 = tempfile.NamedTemporaryFile(delete=True)
# First export
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'foo', '{"foo":"bar"}']))
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'bar', 'original']))
variable_command.variables_export(self.parser.parse_args(['variables', 'export', tmp1.name]))
first_exp = open(tmp1.name)
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'bar', 'updated']))
variable_command.variables_set(self.parser.parse_args(['variables', 'set', 'foo', '{"foo":"oops"}']))
variable_command.variables_delete(self.parser.parse_args(['variables', 'delete', 'foo']))
variable_command.variables_import(self.parser.parse_args(['variables', 'import', tmp1.name]))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Second export
variable_command.variables_export(self.parser.parse_args(['variables', 'export', tmp2.name]))
second_exp = open(tmp2.name)
self.assertEqual(first_exp.read(), second_exp.read())
# Clean up files
second_exp.close()
first_exp.close()
|
the-stack_106_15649
|
import argparse
from time import time
from utils import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--skip", type=bool, default=True, help="use skip pointer or not")
args = parser.parse_args()
inverted_index = load_obj("inverted_index")
query = input("请输入查询内容的合取范式,如:(data | math | science) & (!information) & (computer | !system)\n").lower()
# query = "(data | math | science) & (!information) & (computer | !system)".lower()
start = time()
res_vec = parse_query(inverted_index, query, use_skip=args.skip)
stop = time()
res_show(res_vec)
print(f"--------------Length Of Result Set: {len(res_vec)}--------------------------------------")
print(f"--------------Time Used To Process Query: {stop-start}s-------------")
|
the-stack_106_15650
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import re
import struct
import cv2
import numpy as np
import megengine as mge
import megengine.core._imperative_rt as rt
import megengine.core.tensor.megbrain_graph as G
from megengine.utils import comp_graph_tools as cgtools
from megengine.core.ops import builtin
from megengine.core._imperative_rt.core2 import apply
from megengine.core.tensor.megbrain_graph import VarNode
from megengine import tensor
logger = mge.get_logger(__name__)
def auto_reformat_image(args, path, data, dst_shape):
"""reformat image to target shape
:param data: image data as numpy array
:param dst_shape: target shape
"""
dim3_format = False # required input format does not contain batch
hwc_format = False # required input format is NHWC
if not dst_shape: # input tensor shape is not predefined
if len(data.shape) == 2:
chl = 1
h = data.shape[0]
w = data.shape[1]
else:
assert len(data.shape) == 3, "Input image must be of dimension 2 or 3"
h, w, chl = data.shape
dst_shape = (1, chl, h, w)
if len(dst_shape) == 3:
dst_shape = (1,) + dst_shape
dim3_format = True
assert len(dst_shape) == 4, "bad dst_shape: {}".format(dst_shape)
chl = dst_shape[1]
if chl in [1, 3]:
n, c, h, w = dst_shape
dst_shape = (n, h, w, c)
else:
chl = dst_shape[3]
assert chl in [1, 3], "can not infer input format from shape: {}".format(
dst_shape
)
hwc_format = True
# dst_shape has now been normalized to NHWC format
if args.resize_input:
h, w = dst_shape[1:3]
data = cv2.resize(data, (w, h))
logger.info("input {} resized to {}".format(path, data.shape))
if chl == 1:
data = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY)
data = data[:, :, np.newaxis]
assert data.ndim == 3
data = data[np.newaxis]
# data normalized to NHWC format
if not hwc_format:
data = np.transpose(data, (0, 3, 1, 2))
if dim3_format:
data = np.squeeze(data, 0)
return data
def read_input_data(args, dst_shape, dtype, path, repeat):
def check_shape_equal(dst_shape, data_shape):
if len(dst_shape):
assert len(data_shape) == len(
dst_shape
), "input/data shapes mismatch: {} vs {}".format(dst_shape, data_shape)
if data_shape[1:] != dst_shape[1:]:
logger.warning(
"dst_shape is {}; data_shape is {}".format(dst_shape, data_shape)
)
if path.startswith("#"):
assert not args.resize_input
assert not args.input_transform
spec = path
m = re.match(r"^#rand\(([-0-9.]*)\s*,\s*([-0-9.]*)\s*(,[^\)]+)?\)$", spec)
assert m, "bad spec {}".format(spec)
rng_min = float(m.group(1))
rng_max = float(m.group(2))
if m.group(3):
shape_str = m.group(3)
try:
shape = shape_str[1:].split(",")
if shape[-1].strip() == "...":
shape = shape[:-1]
shape.extend(list(dst_shape[len(shape) :]))
data_shape = tuple(map(int, shape))
except ValueError as e:
raise ValueError("bad spec {}: {}".format(spec, e.args))
else:
data_shape = dst_shape
check_shape_equal(dst_shape, data_shape)
return np.random.uniform(rng_min, rng_max, data_shape).astype(dtype)
# try to load image
data = cv2.imread(path, cv2.IMREAD_COLOR)
if data is None:
assert not args.resize_input
data = np.load(path)
assert isinstance(data, np.ndarray)
else:
# load image succeeds, so we expect input format is image format
data = auto_reformat_image(args, path, data, dst_shape)
data = np.repeat(data, repeat, axis=0)
if repeat > 1:
logger.info(
"repeat input for {} times, data shape is {}".format(repeat, data.shape)
)
check_shape_equal(dst_shape, data.shape)
if args.input_transform:
data = eval(args.input_transform, {"data": data, "np": np})
return data
def gen_one_testcase(args, inputs, spec):
paths = spec.split(";")
if len(paths) != len(inputs):
if len(paths) == 1 and paths[0].startswith("#"):
paths = ["{}:{}".format(name, paths[0]) for name in inputs.keys()]
assert len(paths) == len(inputs), "required inputs: {}; data paths: {}".format(
inputs.keys(), paths
)
if len(paths) == 1 and ":" not in paths[0]:
paths[0] = next(iter(inputs.keys())) + ":" + paths[0]
ret = {}
for path in paths:
var, path = path.split(":")
if args.repeat:
repeat = args.repeat
else:
repeat = 1
ret[var] = read_input_data(
args, inputs[var].shape, inputs[var].dtype, path, repeat
)
return ret
def make_feeds(args):
cg_rt, _, outputs = G.load_graph(args.input)
inputs = cgtools.get_dep_vars(outputs, "Host2DeviceCopy")
inputs = {i.name: i for i in inputs}
if not args.no_assert:
replace_varmap = {}
inp_map = {}
# replace var use InputNode
for name, var in inputs.items():
inp = G.InputNode(
device="xpux", dtype=var.dtype, shape=var.shape, graph=cg_rt
)
replace_varmap[var] = inp.outputs[0]
inp_map[name] = inp
new = cgtools.replace_vars(outputs, replace_varmap)
if isinstance(new, rt.VarNode):
new = list(new)
output_nodes = [G.OutputNode(var) for var in new]
func = cg_rt.compile([node.outputs[0] for node in output_nodes])
def make_dev_tensor(value, dtype=None, device=None):
return tensor(value, dtype=dtype, device=device)._dev_tensor()
def calculate(*args, **kwargs):
output_val = []
# set inputs value
for name, var in inputs.items():
val = kwargs.pop(name, None)
assert val is not None, "miss input name{}".format(name)
dev_tensor = make_dev_tensor(val, dtype=var.dtype, device="xpux")
inp_map[name].set_value(dev_tensor)
func.execute()
for res in output_nodes:
output_val.append(res.get_value().numpy())
return output_val
def expect_name(var):
return "{}:expect".format(var.name)
testcases = []
np.set_printoptions(precision=2, threshold=4, suppress=True)
data_list = []
for item in args.data:
if item.startswith("@"):
with open(item[1:], "r") as f:
data_list.extend([line.rstrip() for line in f if line.rstrip() != ""])
else:
data_list.append(item)
for inp_spec in data_list:
cur_testcase = gen_one_testcase(args, inputs, inp_spec)
assert len(cur_testcase) == len(
inputs
), "required inputs: {}; given data: {}".format(
inputs.keys(), cur_testcase.keys()
)
if not args.no_assert:
outputs_get = calculate(**cur_testcase)
for var, val in zip(outputs, outputs_get):
cur_testcase[expect_name(var)] = val
logger.info(
"generate test groundtruth: var={} shape={} range=({}, {})"
" mean={} var={}".format(
var, val.shape, val.min(), val.max(), np.mean(val), np.var(val)
)
)
testcases.append(cur_testcase)
logger.info(
"add testcase: \n {}".format(
"\n ".join(
"{}: shape={} dtype={} range=({:.2f},{:.2f}) "
"mean={:.2f} sd={:.2f}".format(
k, v.shape, v.dtype, v.min(), v.max(), np.mean(v), np.std(v)
)
for k, v in sorted(cur_testcase.items())
)
)
)
if not args.no_assert:
def expect_shp(var):
ret = var.shape
if ret:
return ret
return testcases[0][expect_name(var)].shape
def assert_equal(expect, real, **kwargs):
op = builtin.AssertEqual(**kwargs)
(res,) = G.apply_normal_varnode(op, expect, real)
return G.VarNode(res)
verbose = not args.silent
outputs_new = []
for i in outputs:
device = rt.CompNode("xpux")
dtype = i.dtype
name = expect_name(i)
shape = expect_shp(i)
# make expect output as one input of model.
expect_get = rt.make_h2d(cg_rt, device, dtype, shape, name)
# insert assert opr to check expect and real.
outputs_new.append(
assert_equal(
G.VarNode(expect_get),
G.VarNode(i),
verbose=verbose,
maxerr=args.maxerr,
)
)
inputs[expect_name(i)] = expect_get
outputs = outputs_new
return {"outputs": outputs, "testcases": testcases}
def optimize_for_inference(args, outputs):
args_map = {
"enable_io16xc32": "f16_io_f32_comp",
"enable_ioc16": "f16_io_comp",
"enable_hwcd4": "use_nhwcd4",
"enable_nchw4": "use_nchw4",
"enable_nchw88": "use_nchw88",
"enable_nchw44": "use_nchw44",
"enable_nchw44_dot": "use_nchw44_dot",
"enable_nchw32": "use_nchw32",
"enable_chwn4": "use_chwn4",
"enable_fuse_conv_bias_nonlinearity": "fuse_conv_bias_nonlinearity",
"enable_fuse_conv_bias_with_z": "fuse_conv_bias_with_z",
}
kwargs = {}
for k, v in args_map.items():
if getattr(args, k):
assert (
args.optimize_for_inference
), "optimize_for_inference should be set when {} is given".format(k)
kwargs[v] = True
if args.optimize_for_inference:
outputs = [i._node for i in G.optimize_for_inference(outputs, **kwargs)]
return outputs
def main():
parser = argparse.ArgumentParser(
description="Pack computing graph, input values and expected output "
"values into one file for checking correctness. README.md gives more "
"details on the usage",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("input", help="MegEngine dumped model file")
parser.add_argument("-o", "--output", help="output file", required=True)
parser.add_argument(
"-d",
"--data",
default=[],
action="append",
required=True,
help="Given input test data when input file is a network, "
"and current network output would be used as groundtruth. "
"The format is var0:file0;var1:file1... to specify data files for "
"input vars. It can also be #rand(min,max,shape...) for generating "
"random input data, for example, #rand(0,255), "
"#rand(0,255,1,3,224,224) or #rand(0, 255, 1, ...) where `...` means "
"the remaining part of the original shape. "
"If the shape is not specified, the shape of "
"corresponding input tensors in the network will be used. "
"If there is only one input var, its name can be omitted. "
"Each data file can either be an image which can be loaded by opencv, "
"or a pickled numpy.ndarray. "
"This option can be given multiple times to add multiple testcases. "
" *NOTE* "
"If you start the data with the letter @, the rest should be a "
"filename, and each line in the file should be a single datum in "
"the format described above. ",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="Specify how many times the input image is repeated. "
"Useful when running benchmark for batch size other than one. "
"Have no effect on randomly generated input data.",
)
parser.add_argument(
"--silent",
action="store_true",
help="set verbose to False in asserti_equal opr",
)
parser.add_argument(
"--optimize-for-inference",
action="store_false",
help="enbale optimization for inference",
)
parser.add_argument(
"--no-assert",
action="store_true",
help="do not insert assert_equal opr to check result; "
"this option is useful for benchmarking",
)
parser.add_argument(
"--maxerr",
type=float,
default=1e-4,
help="max error for assert_equal check during runtime",
)
parser.add_argument(
"--resize-input",
action="store_true",
help="resize input image to fit input var shape",
)
parser.add_argument(
"--input-transform",
help="a python expression to transform the input data. "
"Example: data / np.std(data)",
)
parser.add_argument(
"--discard-var-name",
action="store_true",
help="discard variable and param names in the " "generated output",
)
parser.add_argument(
"--output-strip-info", action="store_true", help="output code strip information"
)
parser.add_argument(
"--enable-io16xc32",
action="store_true",
help="transform the mode to float16 io float32 compute",
)
parser.add_argument(
"--enable-ioc16",
action="store_true",
help="transform the dtype of the model to float16 io " "and compute",
)
parser.add_argument(
"--enable-fuse-conv-bias-nonlinearity",
action="store_true",
help="fuse convolution bias and nonlinearity opr to a "
"conv_bias opr and compute",
)
parser.add_argument(
"--enable-hwcd4",
action="store_true",
help="transform the model format from NCHW to NHWCD4 "
"for inference; you may need to disable CUDA and set "
"MGB_USE_MEGDNN_DBG=2",
)
parser.add_argument(
"--enable-nchw4",
action="store_true",
help="transform the model format from NCHW to NCHW4 " "for inference",
)
parser.add_argument(
"--enable-nchw88",
action="store_true",
help="transform the model format from NCHW to NCHW88 " "for inference",
)
parser.add_argument(
"--enable-nchw44",
action="store_true",
help="transform the model format from NCHW to NCHW44 " "for inference",
)
parser.add_argument(
"--enable-nchw44-dot",
action="store_true",
help="transform the model format from NCHW to NCHW44_DOT "
"for optimizing armv8.2 dot in inference",
)
parser.add_argument(
"--enable-nchw32",
action="store_true",
help="transform the model format from NCHW4 to NCHW32 "
"for inference on nvidia TensoCore",
)
parser.add_argument(
"--enable-chwn4",
action="store_true",
help="transform the model format to CHWN4 "
"for inference, mainly used for nvidia tensorcore",
)
parser.add_argument(
"--enable-fuse-conv-bias-with-z",
action="store_true",
help="fuse conv_bias with z input for inference on "
"nvidia GPU (this optimization pass will result in mismatch "
"of the precision of output of training and inference)",
)
args = parser.parse_args()
feeds = make_feeds(args)
assert isinstance(feeds, dict) and feeds["testcases"], "testcases can not be empty"
output_mgbvars = feeds["outputs"]
output_mgbvars = optimize_for_inference(args, output_mgbvars)
inputs = cgtools.get_dep_vars(output_mgbvars, "Host2DeviceCopy")
inputs = sorted((i.name, i.dtype) for i in inputs)
if args.discard_var_name:
sereg_kwargs = dict(keep_var_name=0, keep_param_name=False)
else:
sereg_kwargs = dict(keep_var_name=2, keep_param_name=True)
strip_info_file = args.output + ".json" if args.output_strip_info else None
with open(args.output, "wb") as fout:
fout.write(b"mgbtest0")
fout.write(struct.pack("I", len(feeds["testcases"])))
if isinstance(output_mgbvars, dict):
wrap_output_vars = dict([(i, VarNode(j)) for i, j in output_mgbvars])
else:
wrap_output_vars = [VarNode(i) for i in output_mgbvars]
dump_content, stat = G.dump_graph(
wrap_output_vars,
append_json=True,
strip_info_file=strip_info_file,
**sereg_kwargs
)
fout.write(dump_content)
logger.info(
"graph dump sizes: tot_size={:.3f}KiB overhead={:.3f}KiB".format(
stat.tot_bytes / 1024, (stat.tot_bytes - stat.tensor_value_bytes) / 1024
)
)
def make_dev_tensor(value, dtype=None, device=None):
return tensor(value, dtype=dtype, device=device)._dev_tensor()
for testcase in feeds["testcases"]:
assert isinstance(testcase, dict)
cg = G.Graph()
output_mgbvars = []
for name, dtype in inputs:
output_mgbvars.append(
cg.make_const(
make_dev_tensor(testcase.pop(name), dtype=dtype, device="cpux")
)
)
assert not testcase, "extra inputs provided in testcase: {}".format(
testcase.keys()
)
with open(args.output, "ab") as fout:
dump_content, _ = G.dump_graph(
output_mgbvars, strip_info_file=strip_info_file, append_json=True
)
fout.write(dump_content)
if __name__ == "__main__":
main()
|
the-stack_106_15651
|
# -*- coding: utf-8 -*-
"""
A window with a unicode textfield where the user can edit.
Useful for editing the contents of an article.
"""
#
# (C) Rob W.W. Hooft, 2003
# (C) Daniel Herding, 2004
# Wikiwichtel
# (C) Pywikibot team, 2008-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import pywikibot
from pywikibot import __url__
from pywikibot.tools import PY2, PYTHON_VERSION, UnicodeType
if not PY2:
import tkinter as Tkinter
from tkinter.scrolledtext import ScrolledText
from tkinter import simpledialog as tkSimpleDialog
else:
import Tkinter
import tkSimpleDialog
from ScrolledText import ScrolledText
# T164163: Fix idlelib import in Python 3.6
if PYTHON_VERSION >= (3, 6):
from idlelib import (
search as SearchDialog,
replace as ReplaceDialog,
configdialog as configDialog
)
from idlelib.config import idleConf
from idlelib.multicall import MultiCallCreator
else:
from idlelib import SearchDialog, ReplaceDialog, configDialog
from idlelib.configHandler import idleConf
from idlelib.MultiCall import MultiCallCreator
class TextEditor(ScrolledText):
"""A text widget with some editing enhancements.
A lot of code here is copied or adapted from the idlelib/EditorWindow.py
file in the standard Python distribution.
"""
def __init__(self, master=None, **kwargs):
"""
Initializer.
Get default settings from user's IDLE configuration.
"""
currentTheme = idleConf.CurrentTheme()
textcf = {
'padx': 5,
'wrap': 'word',
'undo': 'True',
'foreground': idleConf.GetHighlight(
currentTheme, 'normal', fgBg='fg'),
'background': idleConf.GetHighlight(
currentTheme, 'normal', fgBg='bg'),
'highlightcolor': idleConf.GetHighlight(
currentTheme, 'hilite', fgBg='fg'),
'highlightbackground': idleConf.GetHighlight(
currentTheme, 'hilite', fgBg='bg'),
'insertbackground': idleConf.GetHighlight(
currentTheme, 'cursor', fgBg='fg'),
'width': idleConf.GetOption('main', 'EditorWindow', 'width'),
'height': idleConf.GetOption('main', 'EditorWindow', 'height'),
}
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold',
type='bool'):
fontWeight = 'bold'
textcf['font'] = (idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow',
'font-size'),
fontWeight)
# override defaults with any user-specified settings
textcf.update(kwargs)
ScrolledText.__init__(self, master, **textcf)
def add_bindings(self):
"""Assign key and events bindings to methods."""
# due to IDLE dependencies, this can't be called from __init__
# add key and event bindings
self.bind('<<cut>>', self.cut)
self.bind('<<copy>>', self.copy)
self.bind('<<paste>>', self.paste)
self.bind('<<select-all>>', self.select_all)
self.bind('<<remove-selection>>', self.remove_selection)
self.bind('<<find>>', self.find_event)
self.bind('<<find-again>>', self.find_again_event)
self.bind('<<find-selection>>', self.find_selection_event)
self.bind('<<replace>>', self.replace_event)
self.bind('<<goto-line>>', self.goto_line_event)
self.bind('<<del-word-left>>', self.del_word_left)
self.bind('<<del-word-right>>', self.del_word_right)
keydefs = {'<<copy>>': ['<Control-Key-c>', '<Control-Key-C>'],
'<<cut>>': ['<Control-Key-x>', '<Control-Key-X>'],
'<<del-word-left>>': ['<Control-Key-BackSpace>'],
'<<del-word-right>>': ['<Control-Key-Delete>'],
'<<end-of-file>>': ['<Control-Key-d>', '<Control-Key-D>'],
'<<find-again>>': ['<Control-Key-g>', '<Key-F3>'],
'<<find-selection>>': ['<Control-Key-F3>'],
'<<find>>': ['<Control-Key-f>', '<Control-Key-F>'],
'<<goto-line>>': ['<Alt-Key-g>', '<Meta-Key-g>'],
'<<paste>>': ['<Control-Key-v>', '<Control-Key-V>'],
'<<redo>>': ['<Control-Shift-Key-Z>'],
'<<remove-selection>>': ['<Key-Escape>'],
'<<replace>>': ['<Control-Key-h>', '<Control-Key-H>'],
'<<select-all>>': ['<Control-Key-a>'],
'<<undo>>': ['<Control-Key-z>', '<Control-Key-Z>'],
}
for event, keylist in keydefs.items():
if keylist:
self.event_add(event, *keylist)
def cut(self, event):
"""Perform cut operation."""
if self.tag_ranges('sel'):
self.event_generate('<<Cut>>')
return 'break'
def copy(self, event):
"""Perform copy operation."""
if self.tag_ranges('sel'):
self.event_generate('<<Copy>>')
return 'break'
def paste(self, event):
"""Perform paste operation."""
self.event_generate('<<Paste>>')
return 'break'
def select_all(self, event=None):
"""Perform select all operation."""
self.tag_add('sel', '1.0', 'end-1c')
self.mark_set('insert', '1.0')
self.see('insert')
return 'break'
def remove_selection(self, event=None):
"""Perform remove operation."""
self.tag_remove('sel', '1.0', 'end')
self.see('insert')
def del_word_left(self, event):
"""Perform delete word (left) operation."""
self.event_generate('<Meta-Delete>')
return 'break'
def del_word_right(self, event=None):
"""Perform delete word (right) operation."""
self.event_generate('<Meta-d>')
return 'break'
def find_event(self, event=None):
"""Perform find operation."""
if not self.tag_ranges('sel'):
found = self.tag_ranges('found')
if found:
self.tag_add('sel', found[0], found[1])
else:
self.tag_add('sel', '1.0', '1.0+1c')
SearchDialog.find(self)
return 'break'
def find_again_event(self, event=None):
"""Perform find again operation."""
SearchDialog.find_again(self)
return 'break'
def find_selection_event(self, event=None):
"""Perform find selection operation."""
SearchDialog.find_selection(self)
return 'break'
def replace_event(self, event=None):
"""Perform replace operation."""
ReplaceDialog.replace(self)
return 'break'
def find_all(self, s):
"""
Highlight all occurrences of string s, and select the first one.
If the string has already been highlighted, jump to the next occurrence
after the current selection. (You cannot go backwards using the
button, but you can manually place the cursor anywhere in the
document to start searching from that point.)
"""
if hasattr(self, '_highlight') and self._highlight == s:
try:
if self.get(Tkinter.SEL_FIRST, Tkinter.SEL_LAST) == s:
return self.find_selection_event(None)
else:
# user must have changed the selection
found = self.tag_nextrange('found', Tkinter.SEL_LAST)
except Tkinter.TclError:
# user must have unset the selection
found = self.tag_nextrange('found', Tkinter.INSERT)
if not found:
# at last occurrence, scroll back to the top
found = self.tag_nextrange('found', 1.0)
if found:
self.do_highlight(found[0], found[1])
else:
# find all occurrences of string s;
# adapted from O'Reilly's Python in a Nutshell
# remove previous uses of tag 'found', if any
self.tag_remove('found', '1.0', Tkinter.END)
if s:
self._highlight = s
# start from the beginning (and when we come to the end, stop)
idx = '1.0'
while True:
# find next occurrence, exit loop if no more
idx = self.search(s, idx, nocase=1, stopindex=Tkinter.END)
if not idx:
break
# index right after the end of the occurrence
lastidx = '%s+%dc' % (idx, len(s))
# tag the whole occurrence (start included, stop excluded)
self.tag_add('found', idx, lastidx)
# prepare to search for next occurrence
idx = lastidx
# use a red foreground for all the tagged occurrences
self.tag_config('found', foreground='red')
found = self.tag_nextrange('found', 1.0)
if found:
self.do_highlight(found[0], found[1])
def do_highlight(self, start, end):
"""Select and show the text from index start to index end."""
self.see(start)
self.tag_remove(Tkinter.SEL, '1.0', Tkinter.END)
self.tag_add(Tkinter.SEL, start, end)
self.focus_set()
def goto_line_event(self, event):
"""Perform goto line operation."""
lineno = tkSimpleDialog.askinteger('Goto', 'Go to line number:',
parent=self)
if lineno is None:
return 'break'
if lineno <= 0:
self.bell()
return 'break'
self.mark_set('insert', '%d.0' % lineno)
self.see('insert')
class EditBoxWindow(Tkinter.Frame):
"""Edit box window."""
def __init__(self, parent=None, **kwargs):
"""Initializer."""
if parent is None:
# create a new window
parent = Tkinter.Tk()
self.parent = parent
Tkinter.Frame.__init__(self, parent)
self.editbox = MultiCallCreator(TextEditor)(self, **kwargs)
self.editbox.pack(side=Tkinter.TOP)
self.editbox.add_bindings()
self.bind('<<open-config-dialog>>', self.config_dialog)
bottom = Tkinter.Frame(parent)
# lower left subframe with a textfield and a Search button
bottom_left_frame = Tkinter.Frame(bottom)
self.textfield = Tkinter.Entry(bottom_left_frame)
self.textfield.pack(side=Tkinter.LEFT, fill=Tkinter.X, expand=1)
buttonSearch = Tkinter.Button(bottom_left_frame, text='Find next',
command=self.find)
buttonSearch.pack(side=Tkinter.RIGHT)
bottom_left_frame.pack(side=Tkinter.LEFT, expand=1)
# lower right subframe which will contain OK and Cancel buttons
bottom_right_frame = Tkinter.Frame(bottom)
buttonOK = Tkinter.Button(bottom_right_frame, text='OK',
command=self.pressedOK)
buttonCancel = Tkinter.Button(bottom_right_frame, text='Cancel',
command=parent.destroy)
buttonOK.pack(side=Tkinter.LEFT, fill=Tkinter.X)
buttonCancel.pack(side=Tkinter.RIGHT, fill=Tkinter.X)
bottom_right_frame.pack(side=Tkinter.RIGHT, expand=1)
bottom.pack(side=Tkinter.TOP)
# create a toplevel menu
menubar = Tkinter.Menu(self.parent)
findmenu = Tkinter.Menu(menubar)
findmenu.add_command(label='Find',
command=self.editbox.find_event,
accelerator='Ctrl+F',
underline=0)
findmenu.add_command(label='Find again',
command=self.editbox.find_again_event,
accelerator='Ctrl+G',
underline=6)
findmenu.add_command(label='Find all',
command=self.find_all,
underline=5)
findmenu.add_command(label='Find selection',
command=self.editbox.find_selection_event,
accelerator='Ctrl+F3',
underline=5)
findmenu.add_command(label='Replace',
command=self.editbox.replace_event,
accelerator='Ctrl+H',
underline=0)
menubar.add_cascade(label='Find', menu=findmenu, underline=0)
editmenu = Tkinter.Menu(menubar)
editmenu.add_command(label='Cut',
command=self.editbox.cut,
accelerator='Ctrl+X',
underline=2)
editmenu.add_command(label='Copy',
command=self.editbox.copy,
accelerator='Ctrl+C',
underline=0)
editmenu.add_command(label='Paste',
command=self.editbox.paste,
accelerator='Ctrl+V',
underline=0)
editmenu.add_separator()
editmenu.add_command(label='Select all',
command=self.editbox.select_all,
accelerator='Ctrl+A',
underline=7)
editmenu.add_command(label='Clear selection',
command=self.editbox.remove_selection,
accelerator='Esc')
menubar.add_cascade(label='Edit', menu=editmenu, underline=0)
optmenu = Tkinter.Menu(menubar)
optmenu.add_command(label='Settings...',
command=self.config_dialog,
underline=0)
menubar.add_cascade(label='Options', menu=optmenu, underline=0)
# display the menu
self.parent.config(menu=menubar)
self.pack()
def edit(self, text, jumpIndex=None, highlight=None):
"""
Provide user with editor to modify text.
@param text: the text to be edited
@type text: str
@param jumpIndex: position at which to put the caret
@type jumpIndex: int
@param highlight: each occurrence of this substring will be highlighted
@type highlight: str
@return: the modified text, or None if the user didn't save the text
file in his text editor
@rtype: str or None
"""
self.text = None
# put given text into our textarea
self.editbox.insert(Tkinter.END, text)
# wait for user to push a button which will destroy (close) the window
# enable word wrap
self.editbox.tag_add('all', '1.0', Tkinter.END)
self.editbox.tag_config('all', wrap=Tkinter.WORD)
# start search if required
if highlight:
self.find_all(highlight)
if jumpIndex:
# lines are indexed starting at 1
line = text[:jumpIndex].count('\n') + 1
column = jumpIndex - (text[:jumpIndex].rfind('\n') + 1)
# don't know how to place the caret, but scrolling to the right
# line should already be helpful.
self.editbox.see('%d.%d' % (line, column))
# wait for user to push a button which will destroy (close) the window
self.parent.mainloop()
return self.text
def find_all(self, target):
"""Perform find all operation."""
self.textfield.insert(Tkinter.END, target)
self.editbox.find_all(target)
def find(self):
"""Perform find operation."""
# get text to search for
s = self.textfield.get()
if s:
self.editbox.find_all(s)
def config_dialog(self, event=None):
"""Show config dialog."""
configDialog.ConfigDialog(self, 'Settings')
def pressedOK(self):
"""
Perform OK operation.
Called when user pushes the OK button.
Saves the buffer into a variable, and closes the window.
"""
self.text = self.editbox.get('1.0', Tkinter.END)
# if the editbox contains ASCII characters only, get() will
# return string, otherwise unicode (very annoying). We only want
# it to return unicode, so we work around this.
if PY2 and isinstance(self.text, str):
self.text = UnicodeType(self.text)
self.parent.destroy()
def debug(self, event=None):
"""Call quit() and return 'break'."""
self.quit()
return 'break'
# the following class isn't used anywhere in the framework: ####
class ListBoxWindow(object):
"""List box window."""
# called when user pushes the OK button.
# closes the window.
def pressedOK(self):
"""
Perform OK operation.
Closes listbox.
"""
self.parent.destroy()
def __init__(self, parent=None):
"""Initializer."""
if parent is None:
# create a new window
parent = Tkinter.Tk()
self.parent = parent
# selectable: only one item
self.listbox = Tkinter.Listbox(parent, selectmode=Tkinter.SINGLE)
# put list into main frame, using all available space
self.listbox.pack(anchor=Tkinter.CENTER, fill=Tkinter.BOTH)
# lower subframe which will contain one button
self.bottom_frame = Tkinter.Frame(parent)
self.bottom_frame.pack(side=Tkinter.BOTTOM)
buttonOK = Tkinter.Button(self.bottom_frame, text='OK',
command=self.pressedOK)
buttonOK.pack(side=Tkinter.LEFT, fill=Tkinter.X)
# idea: set title to cur_disambiguation
def list(self, list):
"""Put list of alternatives into listbox."""
self.list = list
# find required area
laenge = len(list)
maxbreite = 0
for i in range(laenge):
# cycle through all listitems to find maxlength
if len(list[i]) + len(str(i)) > maxbreite:
maxbreite = len(list[i]) + len(str(i))
# show list as formerly in DOS-window
self.listbox.insert(Tkinter.END, str(i) + ' - ' + list[i])
# set optimized height & width
self.listbox.config(height=laenge, width=maxbreite + 2)
# wait for user to push a button which will destroy (close) the window
return self.list
class Tkdialog(object):
"""The dialog window for image info."""
def __init__(self, photo_description, photo, filename):
"""Initializer."""
self.root = Tkinter.Tk()
# "%dx%d%+d%+d" % (width, height, xoffset, yoffset)
self.root.geometry('%ix%i+10-10' % (pywikibot.config.tkhorsize,
pywikibot.config.tkvertsize))
self.root.title(filename)
self.photo_description = photo_description
self.filename = filename
self.photo = photo
self.skip = False
self.exit = False
# --Init of the widgets
# The image
self.image = self.get_image(self.photo, 800, 600)
self.image_panel = Tkinter.Label(self.root, image=self.image)
self.image_panel.image = self.image
# The filename
self.filename_label = Tkinter.Label(self.root,
text='Suggested filename')
self.filename_field = Tkinter.Entry(self.root, width=100)
self.filename_field.insert(Tkinter.END, filename)
# The description
self.description_label = Tkinter.Label(self.root,
text='Suggested description')
self.description_scrollbar = Tkinter.Scrollbar(self.root,
orient=Tkinter.VERTICAL)
self.description_field = Tkinter.Text(self.root)
self.description_field.insert(Tkinter.END, photo_description)
self.description_field.config(
state=Tkinter.NORMAL, height=12, width=100, padx=0, pady=0,
wrap=Tkinter.WORD, yscrollcommand=self.description_scrollbar.set)
self.description_scrollbar.config(command=self.description_field.yview)
# The buttons
self.ok_button = Tkinter.Button(self.root, text='OK',
command=self.ok_file)
self.skip_button = Tkinter.Button(self.root, text='Skip',
command=self.skip_file)
# --Start grid
# The image
self.image_panel.grid(row=0, column=0, rowspan=11, columnspan=4)
# The buttons
self.ok_button.grid(row=11, column=1, rowspan=2)
self.skip_button.grid(row=11, column=2, rowspan=2)
# The filename
self.filename_label.grid(row=13, column=0)
self.filename_field.grid(row=13, column=1, columnspan=3)
# The description
self.description_label.grid(row=14, column=0)
self.description_field.grid(row=14, column=1, columnspan=3)
self.description_scrollbar.grid(row=14, column=5)
def get_image(self, photo, width, height):
"""Take the BytesIO object and build an imageTK thumbnail."""
try:
from PIL import Image, ImageTk
except ImportError:
pywikibot.warning('This script requires ImageTk from the'
'Python Imaging Library (PIL).\n'
'See: {0}/flickrripper.py'.format(__url__))
raise
image = Image.open(photo)
image.thumbnail((width, height))
imageTk = ImageTk.PhotoImage(image)
return imageTk
def ok_file(self):
"""The user pressed the OK button."""
self.filename = self.filename_field.get()
self.photo_description = self.description_field.get(0.0, Tkinter.END)
self.root.destroy()
def skip_file(self):
"""The user pressed the Skip button."""
self.skip = True
self.root.destroy()
def show_dialog(self):
"""Activate the dialog.
@return: new description, name, and if the image is skipped
@rtype: tuple of (unicode, unicode, bool)
"""
self.root.mainloop()
return self.photo_description, self.filename, self.skip
|
the-stack_106_15654
|
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Experimental code for the QMC paper')
parser.add_argument('--model',
metavar='ARCH',
default='resnet20',
help='model to use (lenet, resnetxx)')
parser.add_argument('--pretrained',
default=True,
action='store_true',
help='whether to use pretrained model (currently only for ImageNet)')
parser.add_argument('--dataset',
default='cifar10',
type=str,
help='dataset used in the experiment (default: cifar10)')
parser.add_argument('--data_path',
type=str,
help='the base directory for dataset')
parser.add_argument('--num_workers',
default=0,
type=int,
metavar='N',
help='number of data loading workers (default: 0)')
parser.add_argument('--epochs',
default=200,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start_epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size',
default=128,
type=int,
metavar='N',
help='mini-batch size (default: 128)')
parser.add_argument('--test_batch_size',
default=1024,
type=int,
metavar='N',
help='mini-batch size used for testing (default: 1024)')
parser.add_argument('--lr',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight_decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--print_freq',
default=50,
type=int,
metavar='N',
help='print frequency (default: 50)')
parser.add_argument('--start_greedy',
default=1,
type=int,
metavar='N',
help='the epoch where the greedy strategy will be first used (100 in CIFAR10 case)')
parser.add_argument('--seed',
default=0,
type=int,
metavar='N',
help='random seed used in the experiment')
parser.add_argument('--use_tensorboard',
default=False,
action='store_true',
help='log the seeds results in a txt file for consistent results')
parser.add_argument('--tensorboard_path',
type=str,
help='the base directory for tensorboard logs')
parser.add_argument('--zo_batch_size',
default=1,
type=int,
metavar='N',
help='zero-th order mini-batch size (default: 16)')
# greedy method related arguments
parser.add_argument('--shuffle_type',
default='random_reshuffling',
choices=['random_reshuffling', 'shuffle_once', 'stale_grad_greedy_sort', 'fresh_grad_greedy_sort'],
type=str,
help='shuffle type used for the optimization (choose from random_reshuffling, shuffle_once, stale_grad_greedy_sort, fresh_grad_greedy_sort)')
parser.add_argument('--task_name',
default='test',
type=str,
help='task name used for tensorboard')
parser.add_argument('--log_metric',
default=False,
action='store_true',
help='whether to log the LHS-QMC metric during training (default: False)')
parser.add_argument('--use_random_proj',
default=False,
action='store_true',
help='whether to use projection when doing the greedy sorting (default: True)')
parser.add_argument('--use_random_proj_full',
default=False,
action='store_true',
help='whether to use projection after storing all the full-dimension gradients (default: True)')
parser.add_argument('--use_qr',
default=False,
action='store_true',
help='whether to use qr_decomposition in the sorting part (default: True)')
parser.add_argument('--proj_ratio',
default=0.1,
type=float,
help='decide project how much ratio of the orginal entire model (default: 0.1)')
parser.add_argument('--proj_target',
default=1024,
type=int,
help='the target dimension for random projection')
# data augmentation related arguments
parser.add_argument('--use_uniform_da',
default=False,
action='store_true',
help='whether to use the baseline data augmentation in the training data')
parser.add_argument('--use_qmc_da',
default=False,
action='store_true',
help='whether to use qmc based data augmentation')
parser.add_argument('--scramble',
default=True,
action='store_true',
help='whether to use scramble in the sobol sequence (default: True)')
parser.add_argument('--transforms_json',
type=str,
help='the file for customized data augmentation')
args = parser.parse_args()
return args
|
the-stack_106_15655
|
from config import *
from meshutil import *
from colorutil import *
import gl.glm as glm
from gl.glrender import *
import time
import cv2 as cv
def gen_segmentation(model, seg_idx, classes):
sample_mesh_path = conf.mesh_path(model, 0)
vertices, faces = load_mesh(sample_mesh_path)
centers, vertex_as, face_as = furthest_point_sample(vertices, faces, classes, 10)
datas = [centers, vertex_as, face_as]
types = ['center', 'vertex_as', 'face_as']
for i in range(3):
seg_path = conf.segmentation_path(model, seg_idx, types[i])
np.save(seg_path, datas[i])
def gen_label(model_range, seg_range, swi_range, dis_range, rot_range, depth_and_vertex=True):
b = conf.zfar * conf.znear / (conf.znear - conf.zfar)
a = -b / conf.znear
# preparation
if depth_and_vertex:
seg_range = [-1] + seg_range
seg_color_map = distinct_colors(conf.num_classes)
renderer = GLRenderer(b'gen_label', (conf.width, conf.height), (0, 0), toTexture=True)
proj = glm.perspective(glm.radians(70), 1.0, conf.znear, conf.zfar)
for model in model_range:
vertex_color = None
for mesh_idx in range(conf.num_meshes[model]):
print('Generate label for model {} Mesh {}...'.format(model, mesh_idx))
mesh_path = conf.mesh_path(model, mesh_idx)
vertices, faces = load_mesh(mesh_path)
regularize_mesh(vertices, model)
faces = faces.reshape([faces.shape[0] * 3])
vertex_buffer = vertices[faces]
if vertex_color is None:
vertex_color = distinct_colors(vertices.shape[0])
vertex_color_buffer = (vertex_color[faces] / 255.0).astype(np.float32)
for seg_idx in seg_range:
# prepare segmentation color
if seg_idx != -1:
seg_path = conf.segmentation_path(model, seg_idx)
segmentation = np.load(seg_path)
seg_color_buffer = np.zeros([faces.shape[0], 3], np.float32)
face_colors = seg_color_map[segmentation] / 255.0
seg_color_buffer[2::3,:] = seg_color_buffer[1::3,:] = seg_color_buffer[0::3,:] = face_colors
for swi in swi_range:
for dis in dis_range:
for rot in rot_range:
mod = glm.identity()
mod = glm.rotate(mod, glm.radians(swi - conf.max_swi / 2), glm.vec3(0, 1, 0))
mod = glm.translate(mod, glm.vec3(0, 0, -dis / 100.0))
mod = glm.rotate(mod, glm.radians(rot), glm.vec3(0, 1, 0))
mvp = proj.dot(mod)
view_name = conf.view_name(swi, dis, rot)
if seg_idx == -1:
rgb, z = renderer.draw(vertex_buffer, vertex_color_buffer, mvp.T)
# save depth view
depth = ((conf.zfar - b / (z - a)) / (conf.zfar - conf.znear) * 255).astype(np.uint8)
depth_view_path = conf.depth_view_path(model, mesh_idx, view_name)
cv.imwrite(depth_view_path, depth)
# save vertex view
vertex_view_path = conf.vertex_view_path(model, mesh_idx, view_name)
cv.imwrite(vertex_view_path, rgb)
else:
rgb, z = renderer.draw(vertex_buffer, seg_color_buffer, mvp.T)
# save segmentation view
seg_view_path = conf.segmentation_view_path(model, mesh_idx, seg_idx, view_name)
cv.imwrite(seg_view_path, rgb)
if __name__ == '__main__':
model_range = ['SCAPE', 'MITcrane', 'MITjumping', 'MITmarch1', 'MITsquat1', 'MITsamba', 'MITswing']
#gen_segmentation('SCAPE', 101, 64)
# model_range = ['SCAPE']
# for model in model_range:
# for i in range(5, 100):
# gen_segmentation(model, i, conf.num_classes)
seg_range = [i for i in range(5, 100)]
swi_range = [25, 35, 45]
dis_range = [250, 200]
rot_range = [i for i in range(0, 360, 15)]
gen_label(model_range, seg_range, swi_range, dis_range, rot_range, True)
|
the-stack_106_15657
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# Enable ascii color control to stdout
stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
stdout_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode))
kernel32.SetConsoleMode(stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# Enable ascii color control to stderr
stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE)
stderr_mode = ctypes.c_int32()
kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode))
kernel32.SetConsoleMode(stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_fee_estimation.py',
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'p2p_timeouts.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_bip68_sequence.py',
'p2p_feefilter.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'tool_wallet.py',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'rpc_misc.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'wallet_createwallet.py',
'wallet_createwallet.py --usecli',
'interface_http.py',
'interface_rpc.py',
'rpc_psbt.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'wallet_groups.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'p2p_blocksonly.py',
'mining_prioritisetransaction.py',
'p2p_invalid_locator.py',
'p2p_invalid_block.py',
'p2p_invalid_messages.py',
'p2p_invalid_tx.py',
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
'mempool_packages.py',
'rpc_createmultisig.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'p2p_leak_tx.py',
'rpc_signmessage.py',
'wallet_balance.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'wallet_import_with_label.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'wallet_scriptaddress2.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'wallet_create_tx.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'wallet_coinbase_category.py',
'feature_filelock.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'rpc_deriveaddresses.py',
'rpc_deriveaddresses.py --usecli',
'rpc_scantxoutset.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'rpc_help.py',
'feature_help.py',
'feature_shutdown.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
'feature_dbcrash.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/test_runner_Ł_🏃_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [test + ".py" if ".py" not in test else test for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci)
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
tmpdir=tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
runs_ci=args.ci,
)
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "barrelcrudecoind"]) is not None:
print("%sWARNING!%s There is already a barrelcrudecoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
test_list=test_list,
flags=flags,
timeout_duration=40 * 60 if runs_ci else float('inf'), # in seconds
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration):
assert num_tests_parallel >= 1
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.timeout_duration = timeout_duration
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list)
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if int(time.time() - start_time) > self.timeout_duration:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(*, src_dir, fail_on_warn):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if fail_on_warn:
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `barrelcrudecoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
the-stack_106_15658
|
#!/usr/bin/env python
'''
Example custom dynamic inventory script for Ansible, in Python.
'''
import os
import sys
import argparse
try:
import json
except ImportError:
import simplejson as json
class ExampleInventory(object):
def __init__(self):
self.inventory = {}
self.read_cli_args()
# Called with `--list`.
if self.args.list:
self.inventory = self.example_inventory()
# Called with `--host [hostname]`.
elif self.args.host:
# Not implemented, since we return _meta info `--list`.
self.inventory = self.empty_inventory()
# If no groups or vars are present, return an empty inventory.
else:
self.inventory = self.empty_inventory()
print(json.dumps(self.inventory))
# Example inventory for testing.
def example_inventory(self):
return {
'group': {
'hosts': ['164.132.182.4'],
'vars': {
'ansible_ssh_user': 'centos',
'ansible_ssh_private_key_file':
'~/.ssh/id_rsa',
'example_variable': 'value'
}
},
'_meta': {
'hostvars': {
'172.81.177.239': {
'host_specific_var': 'apollo'
},
'51.255.211.226': {
'host_specific_var': 'user3'
},
'51.255.211.177': {
'host_specific_var': 'v3'
},
'137.74.107.162': {
'host_specific_var': 'v4'
},
'137.74.85.235': {
'host_specific_var': 'v5'
},
'51.255.211.205': {
'host_specific_var': 'v6'
}
}
}
}
# Empty inventory for testing purpose.
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
# Read the command line args passed to the script.
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
# Get the inventory.
ExampleInventory()
|
the-stack_106_15661
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import random
import numpy as np
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.logging import TestTubeLogger
from torch.nn import functional as F
from torch.optim import RMSprop
from .common.args import Args
from .common.subsample import create_mask_for_mask_type
from .data import transforms
from .mri_model import MRIModel
from .unet_model import UnetModel
class DataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(self, resolution, which_challenge, mask_func=None, use_seed=True):
"""
Args:
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
resolution (int): Resolution of the image.
which_challenge (str): Either "singlecoil" or "multicoil" denoting the dataset.
use_seed (bool): If true, this class computes a pseudo random number generator seed
from the filename. This ensures that the same mask is used for all the slices of
a given volume every time.
"""
if which_challenge not in ('singlecoil', 'multicoil'):
raise ValueError(f'Challenge should either be "singlecoil" or "multicoil"')
self.mask_func = mask_func
self.resolution = resolution
self.which_challenge = which_challenge
self.use_seed = use_seed
def __call__(self, kspace, target, attrs, fname, slice):
"""
Args:
kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
data or (rows, cols, 2) for single coil data.
target (numpy.array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object.
fname (str): File name
slice (int): Serial number of the slice.
Returns:
(tuple): tuple containing:
image (torch.Tensor): Zero-filled input image.
target (torch.Tensor): Target image converted to a torch Tensor.
mean (float): Mean value used for normalization.
std (float): Standard deviation value used for normalization.
"""
kspace = transforms.to_tensor(kspace)
# Apply mask
if self.mask_func:
seed = None if not self.use_seed else tuple(map(ord, fname))
masked_kspace, mask = transforms.apply_mask(kspace, self.mask_func, seed)
else:
masked_kspace = kspace
# Inverse Fourier Transform to get zero filled solution
image = transforms.ifft2(masked_kspace)
# Crop input image to given resolution if larger
smallest_width = min(self.resolution, image.shape[-2])
smallest_height = min(self.resolution, image.shape[-3])
if target is not None:
smallest_width = min(smallest_width, target.shape[-1])
smallest_height = min(smallest_height, target.shape[-2])
crop_size = (smallest_height, smallest_width)
image = transforms.complex_center_crop(image, crop_size)
# Absolute value
image = transforms.complex_abs(image)
# Apply Root-Sum-of-Squares if multicoil data
if self.which_challenge == 'multicoil':
image = transforms.root_sum_of_squares(image)
# Normalize input
image, mean, std = transforms.normalize_instance(image, eps=1e-11)
image = image.clamp(-6, 6)
# Normalize target
if target is not None:
target = transforms.to_tensor(target)
target = transforms.center_crop(target, crop_size)
target = transforms.normalize(target, mean, std, eps=1e-11)
target = target.clamp(-6, 6)
else:
target = torch.Tensor([0])
return image, target, mean, std, fname, slice
class UnetMRIModel(MRIModel):
def __init__(self, hparams):
super().__init__(hparams)
self.unet = UnetModel(
in_chans=1,
out_chans=1,
chans=hparams.num_chans,
num_pool_layers=hparams.num_pools,
drop_prob=hparams.drop_prob
)
def forward(self, input):
return self.unet(input.unsqueeze(1)).squeeze(1)
def training_step(self, batch, batch_idx):
input, target, mean, std, _, _ = batch
output = self.forward(input)
loss = F.l1_loss(output, target)
logs = {'loss': loss.item()}
return dict(loss=loss, log=logs)
def validation_step(self, batch, batch_idx):
input, target, mean, std, fname, slice = batch
output = self.forward(input)
mean = mean.unsqueeze(1).unsqueeze(2)
std = std.unsqueeze(1).unsqueeze(2)
return {
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().numpy(),
'target': (target * std + mean).cpu().numpy(),
'val_loss': F.l1_loss(output, target),
}
def test_step(self, batch, batch_idx):
input, _, mean, std, fname, slice = batch
output = self.forward(input)
mean = mean.unsqueeze(1).unsqueeze(2)
std = std.unsqueeze(1).unsqueeze(2)
return {
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().numpy(),
}
def configure_optimizers(self):
optim = RMSprop(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optim, self.hparams.lr_step_size, self.hparams.lr_gamma)
return [optim], [scheduler]
def train_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, self.hparams.challenge, mask, use_seed=False)
def val_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, self.hparams.challenge, mask)
def test_data_transform(self):
return DataTransform(self.hparams.resolution, self.hparams.challenge)
@staticmethod
def add_model_specific_args(parser):
parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
parser.add_argument('--mask_type',default='random')
return parser
def create_trainer(args, logger):
return Trainer(
#num_nodes=1,
logger=logger,
default_save_path=args.exp_dir,
checkpoint_callback=True,
max_nb_epochs=args.num_epochs,
gpus=args.gpus,
distributed_backend='ddp',
check_val_every_n_epoch=1,
val_check_interval=1.,
early_stop_callback=False
)
def main(args):
if args.mode == 'train':
load_version = 0 if args.resume else None
logger = TestTubeLogger(save_dir=args.exp_dir, name=args.exp, version=load_version)
trainer = create_trainer(args, logger)
model = UnetMRIModel(args)
trainer.fit(model)
else: # args.mode == 'test'
assert args.checkpoint is not None
model = UnetMRIModel.load_from_checkpoint(str(args.checkpoint))
model.hparams.sample_rate = 1.
trainer = create_trainer(args, logger=False)
trainer.test(model)
if __name__ == '__main__':
parser = Args()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--exp-dir', type=pathlib.Path, default='experiments',
help='Path where model and results should be saved')
parser.add_argument('--exp', type=str, help='Name of the experiment')
parser.add_argument('--checkpoint', type=pathlib.Path,
help='Path to pre-trained model. Use with --mode test')
parser.add_argument('--resume', action='store_true',
help='If set, resume the training from a previous model checkpoint. ')
parser = UnetMRIModel.add_model_specific_args(parser)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
main(args)
|
the-stack_106_15663
|
import subprocess
from unittest.mock import MagicMock
import pytest
from briefcase.exceptions import BriefcaseCommandError, InvalidDeviceError
from briefcase.integrations.android_sdk import ADB
def test_install_apk(mock_sdk, capsys):
"""Invoking `install_apk()` calls `run()` with the appropriate
parameters."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(return_value="example normal adb output")
# Invoke install
adb.install_apk("example.apk")
# Validate call parameters.
adb.run.assert_called_once_with("install", "-r", "example.apk")
# Validate that the normal output of the command was not printed (since there
# was no error).
assert "normal adb output" not in capsys.readouterr()
def test_install_failure(mock_sdk, capsys):
"""If `install_apk()` fails, an error is raised."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(
side_effect=subprocess.CalledProcessError(returncode=2, cmd="install")
)
# Invoke install
with pytest.raises(BriefcaseCommandError):
adb.install_apk("example.apk")
# Validate call parameters.
adb.run.assert_called_once_with("install", "-r", "example.apk")
def test_invalid_device(mock_sdk, capsys):
"""Invoking `install_apk()` on an invalid device raises an error."""
# Mock out the run command on an adb instance
adb = ADB(mock_sdk, "exampleDevice")
adb.run = MagicMock(side_effect=InvalidDeviceError("device", "exampleDevice"))
# Invoke install
with pytest.raises(InvalidDeviceError):
adb.install_apk("example.apk")
# Validate call parameters.
adb.run.assert_called_once_with("install", "-r", "example.apk")
|
the-stack_106_15665
|
from typing import Any, Callable
import unittest
from tempfile import TemporaryDirectory
from uuid import uuid4
import logging
import numpy as np
import torch
from rastervision.core.data import (
ClassConfig, DatasetConfig, RasterioSourceConfig, MultiRasterSourceConfig,
SubRasterSourceConfig, ReclassTransformerConfig, SceneConfig,
SemanticSegmentationLabelSourceConfig)
from rastervision.core.rv_pipeline import SemanticSegmentationConfig
from rastervision.pytorch_backend import PyTorchSemanticSegmentationConfig
from rastervision.pytorch_learner import (
SemanticSegmentationModelConfig, SolverConfig,
SemanticSegmentationGeoDataConfig, PlotOptions, GeoDataWindowConfig)
from rastervision.pytorch_learner.utils import (
serialize_albumentation_transform)
from tests.data_files.lambda_transforms import lambda_transforms
from tests import data_file_path
def make_scene(num_channels: int, num_classes: int) -> SceneConfig:
path = data_file_path('multi_raster_source/const_100_600x600.tiff')
rs_cfgs_img = []
for _ in range(num_channels):
rs_cfg = RasterioSourceConfig(
uris=[path],
channel_order=[0],
transformers=[
ReclassTransformerConfig(
mapping={100: np.random.randint(0, 256)})
])
rs_cfgs_img.append(rs_cfg)
rs_cfg_img = MultiRasterSourceConfig(
raster_sources=[
SubRasterSourceConfig(raster_source=rs_cfg, target_channels=[i])
for i, rs_cfg in enumerate(rs_cfgs_img)
],
channel_order=list(range(num_channels)))
rs_cfg_label = RasterioSourceConfig(
uris=[path],
channel_order=[0],
transformers=[
ReclassTransformerConfig(
mapping={100: np.random.randint(0, num_classes)})
])
scene_cfg = SceneConfig(
id=str(uuid4()),
raster_source=rs_cfg_img,
label_source=SemanticSegmentationLabelSourceConfig(
raster_source=rs_cfg_label))
return scene_cfg
class TestSemanticSegmentationLearner(unittest.TestCase):
def assertNoError(self, fn: Callable, msg: str = ''):
try:
fn()
except Exception:
self.fail(msg)
def test_learner(self):
self.assertNoError(lambda: self._test_learner(3, None))
self.assertNoError(
lambda: self._test_learner(6, [(0, 1, 2), (3, 4, 5)]))
def _test_learner(self,
num_channels: int,
channel_display_groups: Any,
num_classes: int = 5):
"""Tests whether the learner can be instantiated correctly and
produce plots."""
logging.disable(logging.CRITICAL)
with TemporaryDirectory() as tmp_dir:
class_config = ClassConfig(
names=[f'class_{i}' for i in range(num_classes)])
dataset_cfg = DatasetConfig(
class_config=class_config,
train_scenes=[
make_scene(
num_channels=num_channels, num_classes=num_classes)
for _ in range(4)
],
validation_scenes=[
make_scene(
num_channels=num_channels, num_classes=num_classes)
for _ in range(2)
],
test_scenes=[])
if num_channels == 6:
tf = lambda_transforms['swap']
aug_tf = serialize_albumentation_transform(
tf,
lambda_transforms_path=data_file_path(
'lambda_transforms.py'),
dst_dir=tmp_dir)
else:
aug_tf = None
data_cfg = SemanticSegmentationGeoDataConfig(
scene_dataset=dataset_cfg,
window_opts=GeoDataWindowConfig(size=20, stride=20),
class_names=class_config.names,
class_colors=class_config.colors,
aug_transform=aug_tf,
plot_options=PlotOptions(
channel_display_groups=channel_display_groups),
num_workers=0)
backend_cfg = PyTorchSemanticSegmentationConfig(
data=data_cfg,
model=SemanticSegmentationModelConfig(pretrained=False),
solver=SolverConfig(),
log_tensorboard=False)
pipeline_cfg = SemanticSegmentationConfig(
root_uri=tmp_dir, dataset=dataset_cfg, backend=backend_cfg)
pipeline_cfg.update()
backend = backend_cfg.build(pipeline_cfg, tmp_dir)
learner = backend.learner_cfg.build(tmp_dir, training=True)
learner.plot_dataloaders()
learner.plot_predictions(split='valid')
torch.save(learner.model.state_dict(), learner.last_model_path)
learner.save_model_bundle()
backend.load_model()
if __name__ == '__main__':
unittest.main()
|
the-stack_106_15666
|
#!/usr/bin/env python3
#
# yosys -- Yosys Open SYnthesis Suite
#
# Copyright (C) 2012 Clifford Wolf <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import os, sys, getopt, re
##yosys-sys-path##
from smtio import SmtIo, SmtOpts, MkVcd
from collections import defaultdict
skip_steps = 0
step_size = 1
num_steps = 20
vcdfile = None
cexfile = None
vlogtbfile = None
inconstr = list()
outconstr = None
gentrace = False
tempind = False
dumpall = False
assume_skipped = None
final_only = False
topmod = None
noinfo = False
so = SmtOpts()
def usage():
print("""
yosys-smtbmc [options] <yosys_smt2_output>
-t <num_steps>
-t <skip_steps>:<num_steps>
-t <skip_steps>:<step_size>:<num_steps>
default: skip_steps=0, step_size=1, num_steps=20
-g
generate an arbitrary trace that satisfies
all assertions and assumptions.
-i
instead of BMC run temporal induction
-m <module_name>
name of the top module
--smtc <constr_filename>
read constraints file
--cex <cex_filename>
read cex file as written by ABC's "write_cex -n"
--noinfo
only run the core proof, do not collect and print any
additional information (e.g. which assert failed)
--final-only
only check final constraints, assume base case
--assume-skipped <start_step>
assume asserts in skipped steps in BMC.
no assumptions are created for skipped steps
before <start_step>.
--dump-vcd <vcd_filename>
write trace to this VCD file
(hint: use 'write_smt2 -wires' for maximum
coverage of signals in generated VCD file)
--dump-vlogtb <verilog_filename>
write trace as Verilog test bench
--dump-smtc <constr_filename>
write trace as constraints file
--dump-all
when using -g or -i, create a dump file for each
step. The character '%' is replaces in all dump
filenames with the step number.
""" + so.helpmsg())
sys.exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:], so.shortopts + "t:igm:", so.longopts +
["final-only", "assume-skipped=", "smtc=", "cex=", "dump-vcd=", "dump-vlogtb=", "dump-smtc=", "dump-all", "noinfo"])
except:
usage()
for o, a in opts:
if o == "-t":
a = a.split(":")
if len(a) == 1:
num_steps = int(a[0])
elif len(a) == 2:
skip_steps = int(a[0])
num_steps = int(a[1])
elif len(a) == 3:
skip_steps = int(a[0])
step_size = int(a[1])
num_steps = int(a[2])
else:
assert 0
elif o == "--assume-skipped":
assume_skipped = int(a)
elif o == "--final-only":
final_only = True
elif o == "--smtc":
inconstr.append(a)
elif o == "--cex":
cexfile = a
elif o == "--dump-vcd":
vcdfile = a
elif o == "--dump-vlogtb":
vlogtbfile = a
elif o == "--dump-smtc":
outconstr = a
elif o == "--dump-all":
dumpall = True
elif o == "--noinfo":
noinfo = True
elif o == "-i":
tempind = True
elif o == "-g":
gentrace = True
elif o == "-m":
topmod = a
elif so.handle(o, a):
pass
else:
usage()
if len(args) != 1:
usage()
constr_final_start = None
constr_asserts = defaultdict(list)
constr_assumes = defaultdict(list)
constr_write = list()
for fn in inconstr:
current_states = None
current_line = 0
with open(fn, "r") as f:
for line in f:
current_line += 1
if line.startswith("#"):
continue
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == "initial":
current_states = set()
if not tempind:
current_states.add(0)
continue
if tokens[0] == "final":
constr_final = True
if len(tokens) == 1:
current_states = set(["final-%d" % i for i in range(0, num_steps+1)])
constr_final_start = 0
elif len(tokens) == 2:
i = int(tokens[1])
assert i < 0
current_states = set(["final-%d" % i for i in range(-i, num_steps+1)])
constr_final_start = -i if constr_final_start is None else min(constr_final_start, -i)
else:
assert 0
continue
if tokens[0] == "state":
current_states = set()
if not tempind:
for token in tokens[1:]:
tok = token.split(":")
if len(tok) == 1:
current_states.add(int(token))
elif len(tok) == 2:
lower = int(tok[0])
if tok[1] == "*":
upper = num_steps
else:
upper = int(tok[1])
for i in range(lower, upper+1):
current_states.add(i)
else:
assert 0
continue
if tokens[0] == "always":
if len(tokens) == 1:
current_states = set(range(0, num_steps+1))
elif len(tokens) == 2:
i = int(tokens[1])
assert i < 0
current_states = set(range(-i, num_steps+1))
else:
assert 0
continue
if tokens[0] == "assert":
assert current_states is not None
for state in current_states:
constr_asserts[state].append(("%s:%d" % (fn, current_line), " ".join(tokens[1:])))
continue
if tokens[0] == "assume":
assert current_states is not None
for state in current_states:
constr_assumes[state].append(("%s:%d" % (fn, current_line), " ".join(tokens[1:])))
continue
if tokens[0] == "write":
constr_write.append(" ".join(tokens[1:]))
continue
if tokens[0] == "logic":
so.logic = " ".join(tokens[1:])
continue
assert 0
def get_constr_expr(db, state, final=False, getvalues=False):
if final:
if ("final-%d" % state) not in db:
return ([], [], []) if getvalues else "true"
else:
if state not in db:
return ([], [], []) if getvalues else "true"
netref_regex = re.compile(r'(^|[( ])\[(-?[0-9]+:|)([^\]]*)\](?=[ )]|$)')
def replace_netref(match):
state_sel = match.group(2)
if state_sel == "":
st = state
elif state_sel[0] == "-":
st = state + int(state_sel[:-1])
else:
st = int(state_sel[:-1])
expr = smt.net_expr(topmod, "s%d" % st, smt.get_path(topmod, match.group(3)))
return match.group(1) + expr
expr_list = list()
for loc, expr in db[("final-%d" % state) if final else state]:
actual_expr = netref_regex.sub(replace_netref, expr)
if getvalues:
expr_list.append((loc, expr, actual_expr))
else:
expr_list.append(actual_expr)
if getvalues:
loc_list, expr_list, acual_expr_list = zip(*expr_list)
value_list = smt.get_list(acual_expr_list)
return loc_list, expr_list, value_list
if len(expr_list) == 0:
return "true"
if len(expr_list) == 1:
return expr_list[0]
return "(and %s)" % " ".join(expr_list)
smt = SmtIo(opts=so)
if noinfo and vcdfile is None and vlogtbfile is None and outconstr is None:
smt.produce_models = False
def print_msg(msg):
print("%s %s" % (smt.timestamp(), msg))
sys.stdout.flush()
print_msg("Solver: %s" % (so.solver))
with open(args[0], "r") as f:
for line in f:
smt.write(line)
for line in constr_write:
smt.write(line)
if topmod is None:
topmod = smt.topmod
assert topmod is not None
assert topmod in smt.modinfo
if cexfile is not None:
with open(cexfile, "r") as f:
cex_regex = re.compile(r'([^\[@=]+)(\[\d+\])?([^@=]*)(@\d+)=([01])')
for entry in f.read().split():
match = cex_regex.match(entry)
assert match
name, bit, extra_name, step, val = match.group(1), match.group(2), match.group(3), match.group(4), match.group(5)
if extra_name != "":
continue
if name not in smt.modinfo[topmod].inputs:
continue
if bit is None:
bit = 0
else:
bit = int(bit[1:-1])
step = int(step[1:])
val = int(val)
if smt.modinfo[topmod].wsize[name] == 1:
assert bit == 0
smtexpr = "(= [%s] %s)" % (name, "true" if val else "false")
else:
smtexpr = "(= ((_ extract %d %d) [%s]) #b%d)" % (bit, bit, name, val)
# print("cex@%d: %s" % (step, smtexpr))
constr_assumes[step].append((cexfile, smtexpr))
def write_vcd_trace(steps_start, steps_stop, index):
filename = vcdfile.replace("%", index)
print_msg("Writing trace to VCD file: %s" % (filename))
with open(filename, "w") as vcd_file:
vcd = MkVcd(vcd_file)
path_list = list()
for netpath in sorted(smt.hiernets(topmod)):
hidden_net = False
for n in netpath:
if n.startswith("$"):
hidden_net = True
if not hidden_net:
vcd.add_net([topmod] + netpath, smt.net_width(topmod, netpath))
path_list.append(netpath)
for i in range(steps_start, steps_stop):
vcd.set_time(i)
value_list = smt.get_net_bin_list(topmod, path_list, "s%d" % i)
for path, value in zip(path_list, value_list):
vcd.set_net([topmod] + path, value)
vcd.set_time(steps_stop)
def write_vlogtb_trace(steps_start, steps_stop, index):
filename = vlogtbfile.replace("%", index)
print_msg("Writing trace to Verilog testbench: %s" % (filename))
with open(filename, "w") as f:
print("module testbench;", file=f)
print(" reg [4095:0] vcdfile;", file=f)
print(" reg clock = 0, genclock = 1;", file=f)
primary_inputs = list()
clock_inputs = set()
for name in smt.modinfo[topmod].inputs:
if name in ["clk", "clock", "CLK", "CLOCK"]:
clock_inputs.add(name)
width = smt.modinfo[topmod].wsize[name]
primary_inputs.append((name, width))
for name, width in primary_inputs:
if name in clock_inputs:
print(" wire [%d:0] PI_%s = clock;" % (width-1, name), file=f)
else:
print(" reg [%d:0] PI_%s;" % (width-1, name), file=f)
print(" %s UUT (" % topmod, file=f)
print(",\n".join(" .{name}(PI_{name})".format(name=name) for name, _ in primary_inputs), file=f)
print(" );", file=f)
print(" initial begin", file=f)
print(" if ($value$plusargs(\"vcd=%s\", vcdfile)) begin", file=f)
print(" $dumpfile(vcdfile);", file=f)
print(" $dumpvars(0, testbench);", file=f)
print(" end", file=f)
print(" while (genclock) begin", file=f)
print(" #5; clock = 0;", file=f)
print(" #5; clock = 1;", file=f)
print(" end", file=f)
print(" end", file=f)
print(" initial begin", file=f)
regs = sorted(smt.hiernets(topmod, regs_only=True))
regvals = smt.get_net_bin_list(topmod, regs, "s%d" % steps_start)
print(" #1;", file=f)
for reg, val in zip(regs, regvals):
hidden_net = False
for n in reg:
if n.startswith("$"):
hidden_net = True
print(" %sUUT.%s = %d'b%s;" % ("// " if hidden_net else "", ".".join(reg), len(val), val), file=f)
mems = sorted(smt.hiermems(topmod))
for mempath in mems:
abits, width, ports = smt.mem_info(topmod, "s%d" % steps_start, mempath)
mem = smt.mem_expr(topmod, "s%d" % steps_start, mempath)
addr_expr_list = list()
for i in range(steps_start, steps_stop):
for j in range(ports):
addr_expr_list.append(smt.mem_expr(topmod, "s%d" % i, mempath, j))
addr_list = set()
for val in smt.get_list(addr_expr_list):
addr_list.add(smt.bv2int(val))
expr_list = list()
for i in addr_list:
expr_list.append("(select %s #b%s)" % (mem, format(i, "0%db" % abits)))
for i, val in zip(addr_list, smt.get_list(expr_list)):
val = smt.bv2bin(val)
print(" UUT.%s[%d] = %d'b%s;" % (".".join(mempath), i, len(val), val), file=f)
for i in range(steps_start, steps_stop):
pi_names = [[name] for name, _ in primary_inputs if name not in clock_inputs]
pi_values = smt.get_net_bin_list(topmod, pi_names, "s%d" % i)
print(" #1;", file=f)
print(" // state %d" % i, file=f)
if i > 0:
print(" @(posedge clock);", file=f)
for name, val in zip(pi_names, pi_values):
print(" PI_%s <= %d'b%s;" % (".".join(name), len(val), val), file=f)
print(" genclock = 0;", file=f)
print(" end", file=f)
print("endmodule", file=f)
def write_constr_trace(steps_start, steps_stop, index):
filename = outconstr.replace("%", index)
print_msg("Writing trace to constraints file: %s" % (filename))
with open(filename, "w") as f:
primary_inputs = list()
for name in smt.modinfo[topmod].inputs:
width = smt.modinfo[topmod].wsize[name]
primary_inputs.append((name, width))
if steps_start == 0:
print("initial", file=f)
else:
print("state %d" % steps_start, file=f)
regnames = sorted(smt.hiernets(topmod, regs_only=True))
regvals = smt.get_net_list(topmod, regnames, "s%d" % steps_start)
for name, val in zip(regnames, regvals):
print("assume (= [%s] %s)" % (".".join(name), val), file=f)
mems = sorted(smt.hiermems(topmod))
for mempath in mems:
abits, width, ports = smt.mem_info(topmod, "s%d" % steps_start, mempath)
mem = smt.mem_expr(topmod, "s%d" % steps_start, mempath)
addr_expr_list = list()
for i in range(steps_start, steps_stop):
for j in range(ports):
addr_expr_list.append(smt.mem_expr(topmod, "s%d" % i, mempath, j))
addr_list = set((smt.bv2int(val) for val in smt.get_list(addr_expr_list)))
expr_list = list()
for i in addr_list:
expr_list.append("(select %s #b%s)" % (mem, format(i, "0%db" % abits)))
for i, val in zip(addr_list, smt.get_list(expr_list)):
print("assume (= (select [%s] #b%s) %s)" % (".".join(mempath), format(i, "0%db" % abits), val), file=f)
for k in range(steps_start, steps_stop):
print("", file=f)
print("state %d" % k, file=f)
pi_names = [[name] for name, _ in sorted(primary_inputs)]
pi_values = smt.get_net_list(topmod, pi_names, "s%d" % k)
for name, val in zip(pi_names, pi_values):
print("assume (= [%s] %s)" % (".".join(name), val), file=f)
def write_trace(steps_start, steps_stop, index):
if vcdfile is not None:
write_vcd_trace(steps_start, steps_stop, index)
if vlogtbfile is not None:
write_vlogtb_trace(steps_start, steps_stop, index)
if outconstr is not None:
write_constr_trace(steps_start, steps_stop, index)
def print_failed_asserts_worker(mod, state, path):
assert mod in smt.modinfo
if smt.get("(|%s_a| %s)" % (mod, state)) in ["true", "#b1"]:
return
for cellname, celltype in smt.modinfo[mod].cells.items():
print_failed_asserts_worker(celltype, "(|%s_h %s| %s)" % (mod, cellname, state), path + "." + cellname)
for assertfun, assertinfo in smt.modinfo[mod].asserts.items():
if smt.get("(|%s| %s)" % (assertfun, state)) in ["false", "#b0"]:
print_msg("Assert failed in %s: %s" % (path, assertinfo))
def print_failed_asserts(state, final=False):
if noinfo: return
loc_list, expr_list, value_list = get_constr_expr(constr_asserts, state, final=final, getvalues=True)
for loc, expr, value in zip(loc_list, expr_list, value_list):
if smt.bv2int(value) == 0:
print_msg("Assert %s failed: %s" % (loc, expr))
if not final:
print_failed_asserts_worker(topmod, "s%d" % state, topmod)
def print_anyconsts_worker(mod, state, path):
assert mod in smt.modinfo
for cellname, celltype in smt.modinfo[mod].cells.items():
print_anyconsts_worker(celltype, "(|%s_h %s| %s)" % (mod, cellname, state), path + "." + cellname)
for fun, info in smt.modinfo[mod].anyconsts.items():
print_msg("Value for anyconst in %s (%s): %d" % (path, info, smt.bv2int(smt.get("(|%s| %s)" % (fun, state)))))
def print_anyconsts(state):
if noinfo: return
print_anyconsts_worker(topmod, "s%d" % state, topmod)
if tempind:
retstatus = False
skip_counter = step_size
for step in range(num_steps, -1, -1):
smt.write("(declare-fun s%d () |%s_s|)" % (step, topmod))
smt.write("(assert (|%s_u| s%d))" % (topmod, step))
smt.write("(assert (|%s_h| s%d))" % (topmod, step))
smt.write("(assert (not (|%s_is| s%d)))" % (topmod, step))
smt.write("(assert %s)" % get_constr_expr(constr_assumes, step))
if step == num_steps:
smt.write("(assert (not (and (|%s_a| s%d) %s)))" % (topmod, step, get_constr_expr(constr_asserts, step)))
else:
smt.write("(assert (|%s_t| s%d s%d))" % (topmod, step, step+1))
smt.write("(assert (|%s_a| s%d))" % (topmod, step))
smt.write("(assert %s)" % get_constr_expr(constr_asserts, step))
if step > num_steps-skip_steps:
print_msg("Skipping induction in step %d.." % (step))
continue
skip_counter += 1
if skip_counter < step_size:
print_msg("Skipping induction in step %d.." % (step))
continue
skip_counter = 0
print_msg("Trying induction in step %d.." % (step))
if smt.check_sat() == "sat":
if step == 0:
print("%s Temporal induction failed!" % smt.timestamp())
print_anyconsts(num_steps)
print_failed_asserts(num_steps)
write_trace(step, num_steps+1, '%')
elif dumpall:
print_anyconsts(num_steps)
print_failed_asserts(num_steps)
write_trace(step, num_steps+1, "%d" % step)
else:
print("%s Temporal induction successful." % smt.timestamp())
retstatus = True
break
else: # not tempind
step = 0
retstatus = True
while step < num_steps:
smt.write("(declare-fun s%d () |%s_s|)" % (step, topmod))
smt.write("(assert (|%s_u| s%d))" % (topmod, step))
smt.write("(assert (|%s_h| s%d))" % (topmod, step))
smt.write("(assert %s)" % get_constr_expr(constr_assumes, step))
if step == 0:
smt.write("(assert (|%s_i| s0))" % (topmod))
smt.write("(assert (|%s_is| s0))" % (topmod))
else:
smt.write("(assert (|%s_t| s%d s%d))" % (topmod, step-1, step))
smt.write("(assert (not (|%s_is| s%d)))" % (topmod, step))
if step < skip_steps:
if assume_skipped is not None and step >= assume_skipped:
print_msg("Skipping step %d (and assuming pass).." % (step))
smt.write("(assert (|%s_a| s%d))" % (topmod, step))
smt.write("(assert %s)" % get_constr_expr(constr_asserts, step))
else:
print_msg("Skipping step %d.." % (step))
step += 1
continue
last_check_step = step
for i in range(1, step_size):
if step+i < num_steps:
smt.write("(declare-fun s%d () |%s_s|)" % (step+i, topmod))
smt.write("(assert (|%s_u| s%d))" % (topmod, step+i))
smt.write("(assert (|%s_h| s%d))" % (topmod, step+i))
smt.write("(assert (|%s_t| s%d s%d))" % (topmod, step+i-1, step+i))
smt.write("(assert %s)" % get_constr_expr(constr_assumes, step+i))
last_check_step = step+i
if not gentrace:
if not final_only:
if last_check_step == step:
print_msg("Checking asserts in step %d.." % (step))
else:
print_msg("Checking asserts in steps %d to %d.." % (step, last_check_step))
smt.write("(push 1)")
smt.write("(assert (not (and %s)))" % " ".join(["(|%s_a| s%d)" % (topmod, i) for i in range(step, last_check_step+1)] +
[get_constr_expr(constr_asserts, i) for i in range(step, last_check_step+1)]))
if smt.check_sat() == "sat":
print("%s BMC failed!" % smt.timestamp())
print_anyconsts(step)
for i in range(step, last_check_step+1):
print_failed_asserts(i)
write_trace(0, last_check_step+1, '%')
retstatus = False
break
smt.write("(pop 1)")
if (constr_final_start is not None) or (last_check_step+1 != num_steps):
for i in range(step, last_check_step+1):
smt.write("(assert (|%s_a| s%d))" % (topmod, i))
smt.write("(assert %s)" % get_constr_expr(constr_asserts, i))
if constr_final_start is not None:
for i in range(step, last_check_step+1):
if i < constr_final_start:
continue
print_msg("Checking final constraints in step %d.." % (i))
smt.write("(push 1)")
smt.write("(assert %s)" % get_constr_expr(constr_assumes, i, final=True))
smt.write("(assert (not %s))" % get_constr_expr(constr_asserts, i, final=True))
if smt.check_sat() == "sat":
print("%s BMC failed!" % smt.timestamp())
print_anyconsts(i)
print_failed_asserts(i, final=True)
write_trace(0, i+1, '%')
retstatus = False
break
smt.write("(pop 1)")
if not retstatus:
break
else: # gentrace
for i in range(step, last_check_step+1):
smt.write("(assert (|%s_a| s%d))" % (topmod, i))
smt.write("(assert %s)" % get_constr_expr(constr_asserts, i))
print_msg("Solving for step %d.." % (last_check_step))
if smt.check_sat() != "sat":
print("%s No solution found!" % smt.timestamp())
retstatus = False
break
elif dumpall:
print_anyconsts(0)
write_trace(0, last_check_step+1, "%d" % step)
step += step_size
if gentrace:
print_anyconsts(0)
write_trace(0, num_steps, '%')
smt.write("(exit)")
smt.wait()
print_msg("Status: %s" % ("PASSED" if retstatus else "FAILED (!)"))
sys.exit(0 if retstatus else 1)
|
the-stack_106_15668
|
# Fine-tuning your model
# Having trained your model, your next task is to evaluate its performance. In this chapter, you will learn about some of the other metrics available in scikit-learn that will allow you to assess your model's performance in a more nuanced manner. Next, learn to optimize your classification and regression models using hyperparameter tuning.
# Metrics for classification
# In Chapter 1, you evaluated the performance of your k-NN classifier based on its accuracy. However, as Andy discussed, accuracy is not always an informative metric. In this exercise, you will dive more deeply into evaluating the performance of binary classifiers by computing a confusion matrix and generating a classification report.
# You may have noticed in the video that the classification report consisted of three rows, and an additional support column. The support gives the number of samples of the true response that lie in that class - so in the video example, the support was the number of Republicans or Democrats in the test set on which the classification report was computed. The precision, recall, and f1-score columns, then, gave the respective metrics for that particular class.
# Here, you'll work with the PIMA Indians dataset obtained from the UCI Machine Learning Repository. The goal is to predict whether or not a given female patient will contract diabetes based on features such as BMI, age, and number of pregnancies. Therefore, it is a binary classification problem. A target value of 0 indicates that the patient does not have diabetes, while a value of 1 indicates that the patient does have diabetes. As in Chapters 1 and 2, the dataset has been preprocessed to deal with missing values.
# The dataset has been loaded into a DataFrame df and the feature and target variable arrays X and y have been created for you. In addition, sklearn.model_selection.train_test_split and sklearn.neighbors.KNeighborsClassifier have already been imported.
# Your job is to train a k-NN classifier to the data and evaluate its performance by generating a confusion matrix and classification report.
# Import necessary modules
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
# Create training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 42)
# Instantiate a k-NN classifier: knn
knn = KNeighborsClassifier(n_neighbors = 6)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
# Predict the labels of the test data: y_pred
y_pred = knn.predict(X_test)
# Generate the confusion matrix and classification report
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# Building a logistic regression model
# Time to build your first logistic regression model! As Hugo showed in the video, scikit-learn makes it very easy to try different models, since the Train-Test-Split/Instantiate/Fit/Predict paradigm applies to all classifiers and regressors - which are known in scikit-learn as 'estimators'. You'll see this now for yourself as you train a logistic regression model on exactly the same data as in the previous exercise. Will it outperform k-NN? There's only one way to find out!
# The feature and target variable arrays X and y have been pre-loaded, and train_test_split has been imported for you from sklearn.model_selection.
# Import the necessary modules
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report
# Create training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state=42)
# Create the classifier: logreg
logreg = LogisticRegression()
# Fit the classifier to the training data
logreg.fit(X_train, y_train)
# Predict the labels of the test set: y_pred
y_pred = logreg.predict(X_test)
# Compute and print the confusion matrix and classification report
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
# Plotting an ROC curve
# Great job in the previous exercise - you now have a new addition to your toolbox of classifiers!
# Classification reports and confusion matrices are great methods to quantitatively evaluate model performance, while ROC curves provide a way to visually evaluate models. As Hugo demonstrated in the video, most classifiers in scikit-learn have a .predict_proba() method which returns the probability of a given sample being in a particular class. Having built a logistic regression model, you'll now evaluate its performance by plotting an ROC curve. In doing so, you'll make use of the .predict_proba() method and become familiar with its functionality.
# Here, you'll continue working with the PIMA Indians diabetes dataset. The classifier has already been fit to the training data and is available as logreg
# Import necessary modules
from sklearn.metrics import roc_curve
# Compute predicted probabilities: y_pred_prob
y_pred_prob = logreg.predict_proba(X_test)[:,1]
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
# AUC computation
# Say you have a binary classifier that in fact is just randomly making guesses. It would be correct approximately 50% of the time, and the resulting ROC curve would be a diagonal line in which the True Positive Rate and False Positive Rate are always equal. The Area under this ROC curve would be 0.5. This is one way in which the AUC, which Hugo discussed in the video, is an informative metric to evaluate a model. If the AUC is greater than 0.5, the model is better than random guessing. Always a good sign!
# In this exercise, you'll calculate AUC scores using the roc_auc_score() function from sklearn.metrics as well as by performing cross-validation on the diabetes dataset.
# X and y, along with training and test sets X_train, X_test, y_train, y_test, have been pre-loaded for you, and a logistic regression classifier logreg has been fit to the training data.
# Import necessary modules
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_auc_score
# Compute predicted probabilities: y_pred_prob
y_pred_prob = logreg.predict_proba(X_test)[:,1]
# Compute and print AUC score
print("AUC: {}".format(roc_auc_score(y_test, y_pred_prob)))
# Compute cross-validated AUC scores: cv_auc
cv_auc = cross_val_score(logreg, X, y, cv=5, scoring='roc_auc')
# Print list of AUC scores
print("AUC scores computed using 5-fold cross-validation: {}".format(cv_auc))
# Hyperparameter tuning with GridSearchCV
# Hugo demonstrated how to tune the n_neighbors parameter of the KNeighborsClassifier() using GridSearchCV on the voting dataset. You will now practice this yourself, but by using logistic regression on the diabetes dataset instead!
# Like the alpha parameter of lasso and ridge regularization that you saw earlier, logistic regression also has a regularization parameter: C. C controls the inverse of the regularization strength, and this is what you will tune in this exercise. A large C can lead to an overfit model, while a small C can lead to an underfit model.
# The hyperparameter space for C has been setup for you. Your job is to use GridSearchCV and logistic regression to find the optimal C in this hyperparameter space. The feature array is available as X and target variable array is available as y.
# You may be wondering why you aren't asked to split the data into training and test sets. Good observation! Here, we want you to focus on the process of setting up the hyperparameter grid and performing grid-search cross-validation. In practice, you will indeed want to hold out a portion of your data for evaluation purposes, and you will learn all about this in the next video!
# Import necessary modules
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
# Setup the hyperparameter grid
c_space = np.logspace(-5, 8, 15)
param_grid = {'C': c_space}
# Instantiate a logistic regression classifier: logreg
logreg = LogisticRegression()
# Instantiate the GridSearchCV object: logreg_cv
logreg_cv = GridSearchCV(logreg, param_grid, cv=5)
# Fit it to the data
logreg_cv.fit(X,y)
# Print the tuned parameters and score
print("Tuned Logistic Regression Parameters: {}".format(logreg_cv.best_params_))
print("Best score is {}".format(logreg_cv.best_score_))
# Hyperparameter tuning with RandomizedSearchCV
# GridSearchCV can be computationally expensive, especially if you are searching over a large hyperparameter space and dealing with multiple hyperparameters. A solution to this is to use RandomizedSearchCV, in which not all hyperparameter values are tried out. Instead, a fixed number of hyperparameter settings is sampled from specified probability distributions. You'll practice using RandomizedSearchCV in this exercise and see how this works.
# Here, you'll also be introduced to a new model: the Decision Tree. Don't worry about the specifics of how this model works. Just like k-NN, linear regression, and logistic regression, decision trees in scikit-learn have .fit() and .predict() methods that you can use in exactly the same way as before. Decision trees have many parameters that can be tuned, such as max_features, max_depth, and min_samples_leaf: This makes it an ideal use case for RandomizedSearchCV.
# As before, the feature array X and target variable array y of the diabetes dataset have been pre-loaded. The hyperparameter settings have been specified for you. Your goal is to use RandomizedSearchCV to find the optimal hyperparameters. Go for it!
# Import necessary modules
from scipy.stats import randint
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import RandomizedSearchCV
# Setup the parameters and distributions to sample from: param_dist
param_dist = {"max_depth": [3, None],
"max_features": randint(1, 9),
"min_samples_leaf": randint(1, 9),
"criterion": ["gini", "entropy"]}
# Instantiate a Decision Tree classifier: tree
tree = DecisionTreeClassifier()
# Instantiate the RandomizedSearchCV object: tree_cv
tree_cv = RandomizedSearchCV(tree, param_dist, cv=5)
# Fit it to the data
tree_cv.fit(X, y)
# Print the tuned parameters and score
print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_))
print("Best score is {}".format(tree_cv.best_score_))
# Hold-out set in practice I: Classification
# You will now practice evaluating a model with tuned hyperparameters on a hold-out set. The feature array and target variable array from the diabetes dataset have been pre-loaded as X and y.
# In addition to C, logistic regression has a 'penalty' hyperparameter which specifies whether to use 'l1' or 'l2' regularization. Your job in this exercise is to create a hold-out set, tune the 'C' and 'penalty' hyperparameters of a logistic regression classifier using GridSearchCV on the training set.
# Import necessary modules
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
# Create the hyperparameter grid
c_space = np.logspace(-5, 8, 15)
param_grid = {'C': c_space, 'penalty': ['l1', 'l2']}
# Instantiate the logistic regression classifier: logreg
logreg = LogisticRegression()
# Create train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =0.4, random_state = 42)
# Instantiate the GridSearchCV object: logreg_cv
logreg_cv = GridSearchCV(logreg, param_grid, cv=5)
# Fit it to the training data
logreg_cv.fit(X_train, y_train)
# Print the optimal parameters and best score
print("Tuned Logistic Regression Parameter: {}".format(logreg_cv.best_params_))
print("Tuned Logistic Regression Accuracy: {}".format(logreg_cv.best_score_))
# Hold-out set in practice II: Regression
# Remember lasso and ridge regression from the previous chapter? Lasso used the L1 penalty to regularize, while ridge used the L2 penalty. There is another type of regularized regression known as the elastic net. In elastic net regularization, the penalty term is a linear combination of the L1 and L2 penalties:
# a∗L1+b∗L2
# In scikit-learn, this term is represented by the 'l1_ratio' parameter: An 'l1_ratio' of 1 corresponds to an L1 penalty, and anything lower is a combination of L1 and L2.
# In this exercise, you will GridSearchCV to tune the 'l1_ratio' of an elastic net model trained on the Gapminder data. As in the previous exercise, use a hold-out set to evaluate your model's performance.
# Import necessary modules
from sklearn.linear_model import ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV, train_test_split
# Create train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 42)
# Create the hyperparameter grid
l1_space = np.linspace(0, 1, 30)
param_grid = {'l1_ratio': l1_space}
# Instantiate the ElasticNet regressor: elastic_net
elastic_net = ElasticNet()
# Setup the GridSearchCV object: gm_cv
gm_cv = GridSearchCV(elastic_net, param_grid, cv=5)
# Fit it to the training data
gm_cv.fit(X_train, y_train)
# Predict on the test set and compute metrics
y_pred = gm_cv.predict(X_test)
r2 = gm_cv.score(X_test, y_test)
mse = mean_squared_error(y_test, y_pred)
print("Tuned ElasticNet l1 ratio: {}".format(gm_cv.best_params_))
print("Tuned ElasticNet R squared: {}".format(r2))
print("Tuned ElasticNet MSE: {}".format(mse))
|
the-stack_106_15670
|
import base64
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from raymon.profiling.extractors import SimpleExtractor
class KMeansOutlierScorer(SimpleExtractor):
"""
Clusters the data at building time and generates a data novelty score at validation time.
"""
dist_choices = {"euclidean": euclidean_distances, "cosine": cosine_distances}
def __init__(self, k=16, clusters=None, dist="euclidean"):
self._k = None
self._clusters = None
self._dist = None
self.k = k
self.clusters = clusters
self.dist = dist
"""
PROPERTIES
"""
@property
def k(self):
return self._k
@k.setter
def k(self, value):
if isinstance(value, int) and value > 0:
self._k = value
else:
raise ValueError(f"k must be an int > 0, not {value} ({type(value)})")
@property
def clusters(self):
return self._clusters
@clusters.setter
def clusters(self, value):
if value is None:
self._clusters = None
return
nclusters, dim = value.shape
value = value.astype(np.float64)
if isinstance(value, np.ndarray) and nclusters == self._k:
self._clusters = value
else:
raise ValueError(
f"clusters must be an np.ndarray of shape ({self.k}, dim), not {type(value)} ({value.shape})"
)
@property
def dist(self):
if self._dist in self.dist_choices:
return self.dist_choices[self._dist]
else:
raise ValueError(f"Invalid distance specified: {self._dist}")
@dist.setter
def dist(self, value):
if isinstance(value, str) and value in self.dist_choices:
self._dist = value
else:
raise ValueError(f"dist must be str and one of {self.dist_choices.keys()}, not {value}, ({type(value)})")
@property
def dim(self):
return self.clusters.shape[1]
def extract(self, data):
def sum_2closest(distances):
return np.sort(distances, axis=1)[:, :2].sum(axis=1)
if data.shape == (self.dim,):
data = data[None, :]
elif data.shape == (1, self.dim):
pass
else:
raise ValueError(f"data must be of shape {(1, self.dim)} or {(self.dim, )}, not {data.shape}")
pairwise_dist = self.dist(data, self.clusters)
return float(sum_2closest(pairwise_dist))
"""Buildable interface"""
def build(self, data):
data = np.array(data).astype(np.float64)
km = KMeans(n_clusters=self.k)
km.fit(data)
clusters = km.cluster_centers_
self.clusters = clusters
def is_built(self):
return self.clusters is not None and len(self.clusters) == self.k
"""Serializable interface"""
def to_jcr(self):
b64 = base64.b64encode(self.clusters).decode()
shape = self.clusters.shape
diststr = [k for k, v in self.dist_choices.items() if v == self.dist][0]
data = {"clusters": b64, "k": self.k, "dist": diststr}
state = {"class": self.class2str(), "state": data}
return state
@classmethod
def from_jcr(cls, jcr):
k = jcr["k"]
b64 = jcr["clusters"]
dist = jcr["dist"]
clusters = np.frombuffer(base64.decodebytes(b64.encode()), dtype=np.float64).reshape((k, -1))
return cls(k=k, clusters=clusters, dist=dist)
|
the-stack_106_15672
|
import ray.worker
from ray import profiling
__all__ = ["free", "global_gc"]
def global_gc():
"""Trigger gc.collect() on all workers in the cluster."""
worker = ray.worker.global_worker
worker.core_worker.global_gc()
def memory_summary():
"""Returns a formatted string describing memory usage in the cluster."""
import grpc
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info.
raylet = ray.nodes()[0]
raylet_address = "{}:{}".format(raylet["NodeManagerAddress"],
ray.nodes()[0]["NodeManagerPort"])
channel = grpc.insecure_channel(raylet_address)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0)
return reply.memory_summary
def free(object_refs, local_only=False, delete_creating_tasks=False):
"""Free a list of IDs from the in-process and plasma object stores.
This function is a low-level API which should be used in restricted
scenarios.
If local_only is false, the request will be send to all object stores.
This method will not return any value to indicate whether the deletion is
successful or not. This function is an instruction to the object store. If
some of the objects are in use, the object stores will delete them later
when the ref count is down to 0.
Examples:
>>> x_id = f.remote()
>>> ray.get(x_id) # wait for x to be created first
>>> free([x_id]) # unpin & delete x globally
Args:
object_refs (List[ObjectRef]): List of object refs to delete.
local_only (bool): Whether only deleting the list of objects in local
object store or all object stores.
delete_creating_tasks (bool): Whether also delete the object creating
tasks.
"""
worker = ray.worker.global_worker
if isinstance(object_refs, ray.ObjectRef):
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise TypeError("free() expects a list of ObjectRef, got {}".format(
type(object_refs)))
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"Attempting to call `free` on the value {}, "
"which is not an ray.ObjectRef.".format(object_ref))
worker.check_connected()
with profiling.profile("ray.free"):
if len(object_refs) == 0:
return
worker.core_worker.free_objects(object_refs, local_only,
delete_creating_tasks)
|
the-stack_106_15674
|
import utils
from manager_rest import storage_manager
import manager_rest.manager_exceptions
from constants import (ENTITY_TYPES,
OPERATION_TYPE)
class StepValidator(object):
def __init__(self):
self.sm = storage_manager.get_storage_manager()
def validate(self, dep_update, step):
"""
validate an entity id of provided type exists in provided blueprint.
raises error if id doesn't exist
:param dep_update: the deployment update object.
:param step: the deployment update step object
:return: None
"""
validation_mapper = {
ENTITY_TYPES.NODE: self._validate_node,
ENTITY_TYPES.RELATIONSHIP: self._validate_relationship,
ENTITY_TYPES.PROPERTY: self._validate_property,
ENTITY_TYPES.OPERATION: self._validate_operation
}
if step.entity_type in ENTITY_TYPES:
validator = validation_mapper[step.entity_type]
if validator(dep_update, step):
return
raise \
manager_rest.manager_exceptions.UnknownModificationStageError(
"entity type {0} with entity id {1} doesn't exist"
.format(step.entity_type, step.entity_id))
def _validate_relationship(self, dep_update, step):
""" validates relation type entity id
:param dep_update:
:param step: deployment update step
:return:
"""
entity_keys = utils.get_entity_keys(step.entity_id)
if len(entity_keys) < 4:
return False
NODES, source_node_id, RELATIONSHIPS, relationship_index = entity_keys
# assert the index is indeed readable
relationship_index = utils.parse_index(relationship_index)
if not relationship_index:
return
if step.operation == OPERATION_TYPE.REMOVE:
source_node = self.sm.get_node(dep_update.deployment_id,
source_node_id).to_dict()
else:
source_node = utils.get_raw_node(dep_update.blueprint,
source_node_id)
if not source_node or \
len(source_node[RELATIONSHIPS]) < relationship_index:
return
relationship = source_node[RELATIONSHIPS][relationship_index]
target_node_id = relationship['target_id']
if step.operation == OPERATION_TYPE.REMOVE:
return self.sm.get_node(dep_update.deployment_id, target_node_id)
else:
return utils.get_raw_node(dep_update.blueprint, target_node_id)
def _validate_node(self, dep_update, step):
""" validates node type entity id
:param dep_update:
:param step: deployment update step
:return:
"""
NODES, node_id = utils.get_entity_keys(step.entity_id)
if step.operation == OPERATION_TYPE.REMOVE:
return self.sm.get_node(dep_update.deployment_id, node_id)
else:
return utils.get_raw_node(dep_update.blueprint, node_id)
def _validate_property(self, dep_update, step):
property_keys = utils.get_entity_keys(step.entity_id)
if len(property_keys) < 2:
return
NODES, node_id, PROPERTIES = property_keys[:3]
property_id = property_keys[3:]
storage_node = self.sm.get_node(dep_update.deployment_id, node_id)
raw_node = utils.get_raw_node(dep_update.blueprint, node_id)
is_in_old = utils.traverse_object(storage_node.properties, property_id)
is_in_new = utils.traverse_object(raw_node[PROPERTIES], property_id)
if step.operation == OPERATION_TYPE.REMOVE:
return is_in_old
elif step.operation == OPERATION_TYPE.ADD:
return is_in_new
else:
return is_in_old and is_in_new
def _validate_operation(self, dep_update, step):
operation_keys = utils.get_entity_keys(step.entity_id)
if len(operation_keys) < 2:
return
NODES, node_id, operation_host = operation_keys[:3]
operation_id = operation_keys[3:]
base_node = self.sm.get_node(dep_update.deployment_id, node_id)
is_in_old = utils.traverse_object(getattr(base_node, operation_host),
operation_id)
modified_node = utils.get_raw_node(dep_update.blueprint, node_id)
is_in_new = utils.traverse_object(modified_node[operation_host],
operation_id)
if step.operation == OPERATION_TYPE.REMOVE:
return is_in_old
elif step.operation == OPERATION_TYPE.ADD:
return is_in_new
else:
return is_in_old and is_in_new
|
the-stack_106_15676
|
def flatten(l):
result = list()
for i in l:
if isinstance(i,list):
result.extend(flatten(i))
else:
result.append(i)
return result
print(flatten([1,2,[3],[4,[5,6]]]))
|
the-stack_106_15678
|
import funcy
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
from dateutil import parser
from tqdm import tqdm
from utils.helpers import *
from utils.plot import plot_joint_distribution
font = {
"size": 30
}
matplotlib.rc("font", **font)
pd.options.mode.chained_assignment = None
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
MOST_RECENT_FILE = sorted(os.listdir(os.path.join(BASE_DIR, "data", "REDCap")))[-1]
REDCAP_FPATH = os.path.join(BASE_DIR, "data", "REDCap", MOST_RECENT_FILE)
SERIES_ID_FPATH = os.path.join(BASE_DIR, "data", "match_redcap_plataforma.csv")
SEGMENTATION_FPATH = os.path.join(BASE_DIR, "data", "inference_df.csv")
get_date_regex = r"ProjetoCOVIDAI_DATA_(?P<data>.*)_\d+.csv"
date_str = re.match(get_date_regex, MOST_RECENT_FILE).group("data")
dataset_date = parser.parse(date_str)
# Normalize name and CPF
df = pd.read_csv(REDCAP_FPATH)
df.nome = df.nome.apply(lambda s: to_normalized_string(s) if pd.notna(s) else s)
df.cpf = df.cpf.apply(lambda v: str(int(v)) if pd.notna(v) else v)
# Fill redcap_repeat_instrument missing data with "dados_pessoais_unico" since these
# rows are not filled automatically by the database
df.redcap_repeat_instrument = df.redcap_repeat_instrument.fillna("dados_pessoais_unico")
# Fill the missing hospitalization date with date of admission to ICU if existent
df.data_admissao_hospitalar = df.data_admissao_hospitalar.fillna(df.data_admissao_uti)
# Calculate length of stay based on hospitalization date and date of discharge or
# date of death
fill_length_of_stay = df.apply(
lambda row: calculate_length_of_stay(
row["data_admissao_hospitalar"],
row["data_alta_hospitalar"],
row["data_obito"]
),
axis=1
)
df.tempo_estadia_hospitalar = df.tempo_estadia_hospitalar.fillna(fill_length_of_stay)
# Calculate the date of discharge from ICU based on the date of admission
# in the ICU and length of stay in the ICU.
df["data_alta_uti"] = df.apply(
lambda row: sum_date_with_interval(
row["data_admissao_uti"],
row["tempo_estadia_uti"]
),
axis=1
)
# Calculate the date of removal of the ventilation based on the date of ventilation
# and the length of ventilation
df["data_remocao_ventilacao"] = df.apply(
lambda row: sum_date_with_interval(
row["data_ventilacao"],
row["tempo_ventilacao_mecanica"]
),
axis=1
)
# Calculate age and body mass index
df["idade"] = df.apply(
lambda row: calculate_age(
row["data_nasc"],
row["data_admissao_hospitalar"],
dataset_date
),
axis=1
)
df["imc"] = df.peso / (df.altura ** 2)
# Some of the rows have the plaquets number in a different unity and need to be
# multiplied by 1000
df.plaquetas = df.plaquetas.apply(lambda v: v * 1000 if v < 1000 else v)
############################## Finished processing the ordinary data ##############################
# Here we define variables useful for processing the rest of the data
cols_intermediate_outcomes = [
"data_sepse",
"sepse",
"data_sdra",
"sdra",
"data_falencia_cardiaca",
"falencia_cardiaca",
"data_choque_septico",
"choque_septico",
"data_coagulopatia",
"coagulopatia",
"data_iam",
"iam",
"data_ira",
"ira"
]
cols_personal_data = [
"nome",
"cpf",
"instituicao",
"data_nasc",
"idade",
"sexo",
"altura",
"peso",
"imc",
"alta",
"obito",
"data_admissao_hospitalar",
"data_admissao_uti",
"data_obito",
"data_alta_hospitalar",
"data_alta_uti",
"data_ventilacao",
"data_remocao_ventilacao",
"tempo_estadia_hospitalar",
"tempo_estadia_uti",
"tempo_ventilacao_mecanica"
] + cols_intermediate_outcomes
cols_comorbidities = [
"has",
"ieca_bra",
"dm",
"asma",
"tabagista",
"dpoc",
"cardiopatia",
"irc",
"neoplasia",
"aids",
"neutropenia"
]
cols_respiratory_comorbidities = [
"asma", "tabagista", "dpoc"
]
cols_cardiac_comorbidities = [
"has", "cardiopatia"
]
cols_dates = [
col for col in df.columns
if "data" in col and col not in
cols_personal_data + ["redcap_data_access_group"]
]
identity_map = {
0: 0,
1: 1
}
irc_map = {
1: "negativo",
2: "nao_dialitico",
3: "dialitico"
}
neoplasia_map = {
1: "negativo",
2: "primaria_ou_secundaria",
3: "outras"
}
map_comorbidities = {
"irc": irc_map,
"neoplasia": neoplasia_map
}
# Now we build a separate dataframe for saving pesonal data.
df_personal_data = df[df.redcap_repeat_instrument == "dados_pessoais_unico"]
# Discriminate patients that were admitted to the hospital and to the ICU. Also, discriminate those that
# were discharged and those who died.
df_personal_data["internacao"] = df_personal_data.data_admissao_hospitalar.notna()
df_personal_data["uti"] = df_personal_data.data_admissao_uti.notna()
df_personal_data["obito"] = df_personal_data.data_obito.notna()
df_personal_data["alta"] = df_personal_data.data_alta_hospitalar.notna()
df_personal_data = df_personal_data[
["record_id"] + cols_personal_data + cols_comorbidities
]
for col in cols_comorbidities:
df_personal_data[col] = df_personal_data[col].map(map_comorbidities.get(col, identity_map))
# Count the number of previous comorbidities each patient has.
df_personal_data["n_comorbidades"] = df_personal_data[cols_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_respiratorias"] = df_personal_data[cols_respiratory_comorbidities].apply(count_comorbidities, axis=1)
df_personal_data["n_comorbidades_cardiacas"] = df_personal_data[cols_cardiac_comorbidities].apply(count_comorbidities, axis=1)
############################## Finished processing the personal data ##############################
# Now we build separate dataframes for saving clinical, treatment, laboratorial, image and confirmatory data.
# Clinical dataframe
cols_clinical = [
"data_dispneia",
"dispneia",
"data_sofa",
"sofa_score",
"data_saturacao_o2",
"saturacao_o2",
"data_saps_3",
"saps_3"
]
df_clinical = df[df.redcap_repeat_instrument == "evolucao_clinica_multiplo"]
df_clinical = df_clinical[["record_id"] + cols_clinical]
# We need separate dataframes for each date. Note that the clinical dataframe has four date. We will separate
# the columns accordingly.
df_dispneia = df_clinical[[
"record_id",
"data_dispneia",
"dispneia"
]]
df_sofa = df_clinical[[
"record_id",
"data_sofa",
"sofa_score"
]]
df_saturacao_o2 = df_clinical[[
"record_id",
"data_saturacao_o2",
"saturacao_o2"
]]
df_saps_3 = df_clinical[[
"record_id",
"data_saps_3",
"saps_3"
]]
# Treatment dataframe
cols_treatment = [
"data_ventilacao",
"ventilacao",
"pao2_fio2",
"data_pronacao",
"pronacao",
"data_hemodialise",
"hemodialise"
]
df_treatment = df[df.redcap_repeat_instrument == "evolucao_tratamento_multiplo"]
df_treatment = df_treatment[["record_id"] + cols_treatment]
# Note that the treatment dataframe has four date. We will separate the columns accordingly
# just as we did for the clinical dataframe.
df_ventilacao = df_treatment[[
"record_id",
"data_ventilacao",
"ventilacao",
"pao2_fio2"
]]
df_pronacao = df_treatment[[
"record_id",
"data_pronacao",
"pronacao"
]]
df_hemodialise = df_treatment[[
"record_id" ,
"data_hemodialise",
"hemodialise"
]]
# Laboratory results dataframe
cols_laboratory = [
"leucocitos",
"linfocitos",
"neutrofilos",
"tgp",
"creatinina",
"pcr",
"d_dimero",
"il_6",
"plaquetas",
"rni",
"troponina",
"pro_bnp",
"bicarbonato",
"lactato"
]
df_laboratory = df[df.redcap_repeat_instrument == "evolucao_laboratorial_multiplo"]
df_laboratory = df_laboratory[["record_id", "data_resultados_lab"] + cols_laboratory]
# Image dataframe
cols_image = [
"uid_imagem",
"tipo_imagem",
"data_imagem",
"padrao_imagem_rsna",
"score_tc_dir_sup",
"score_tc_dir_med",
"score_tc_dir_inf",
"score_tc_esq_sup",
"score_tc_esq_med",
"score_tc_esq_inf"
]
df_image = df[df.redcap_repeat_instrument == "evolucao_imagem_multiplo"]
df_image.uid_imagem = df_image.uid_imagem.apply(lambda s: s.strip() if pd.notna(s) else s)
df_image = df_image[["record_id", "redcap_repeat_instance"] + cols_image]
df_image = pd.merge(
left=df_personal_data[["record_id", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"]],
right=df_image,
how="right",
on="record_id",
validate="one_to_many"
)
uids_internados = set(df_image[df_image.data_admissao_hospitalar.notna()].uid_imagem.unique())
# For images, we also have the data retrieved from the deep segmentation model. We need
# to enrich our dataframe with the percentage of healthy lungs, affected by ground-glass opacity
# and consolidation, and the amount of fat in patient's body.
cols_series_id = [
"record_id",
"redcap_repeat_instance",
"infer_series_id"
]
df_series_id = pd.read_csv(SERIES_ID_FPATH, sep=";")
df_series_id = df_series_id[cols_series_id]
df_series_id = df_series_id.drop_duplicates()
cols_segmentation = [
"UID_Plataforma",
"series_id",
"seg_consolidacao",
"seg_normal",
"seg_vf1",
"seg_vf2",
"seg_vf3",
"volume_pulmao",
"taxa_gordura",
"volume_gordura",
"mediastino"
]
tmp_data = []
df_seg_raw = pd.read_csv(SEGMENTATION_FPATH)
df_seg_raw = df_seg_raw[cols_segmentation]
df_seg_raw = df_seg_raw[df_seg_raw.volume_pulmao >= 1.]
df_seg_raw = pd.merge(left=df_series_id, right=df_seg_raw, left_on="infer_series_id", right_on="series_id", how="right")
# Each TC study might have multiple series. We need to select the one with
grouped = df_seg_raw.groupby("UID_Plataforma")
for uid_imagem, group in grouped:
if any(group.mediastino):
use_group = group[group.mediastino]
else:
use_group = group
sorted_group = use_group.sort_values("volume_pulmao")
tmp_data.append(
dict(sorted_group.iloc[-1])
)
df_seg = pd.DataFrame(tmp_data)
df_seg = df_seg[df_seg.seg_normal.notna()]
df_image = pd.merge(
left=df_image,
right=df_seg,
how="left",
on=["record_id", "redcap_repeat_instance"]
)
df_image[
["record_id", "redcap_repeat_instance", "nome", "data_nasc", "data_admissao_hospitalar", "instituicao"] + cols_image
].to_csv(os.path.join(BASE_DIR, "data", "TC_scans.csv"), index=False)
df_image = df_image.rename({"redcap_repeat_instance": "redcap_repeat_instance_image"})
df_matches = df_image[
(df_image.seg_normal.notna()) & (df_image.data_admissao_hospitalar.notna())
]
df_matches[
["record_id", "data_admissao_hospitalar", "instituicao", "data_imagem", "uid_imagem"]
].to_csv(os.path.join(BASE_DIR, "data", "matches.csv"), index=False)
n_matches = df_matches.uid_imagem.nunique()
print(f"{n_matches} between REDCap and segmentation\n")
# COVID-19 confirmation dataframe
df_confirmation = df[df.redcap_repeat_instrument == "confirmacao_covid_multiplo"]
############################## Finished processing the results data ##############################
# Now we are going to create a dataframe that each row corresponds to a moment in the patient stay at the
# hospital. For each date in the patient history, we will update the row with the latest information about
# that patient.
# First, we need to define some helper functions to work on the processing of the data.
def get_group(grouped, key, default_columns):
"""
Gets a group by key from a Pandas Group By object. If the key does not exist, returns an empty
group with the default columns.
"""
if key in grouped.groups:
group = grouped.get_group(key)
else:
group = pd.DataFrame([], columns=default_columns)
return group
def last_register_before_date(registers, date_col, date, default_columns):
"""
Gets the last register before a reference date in a dataframe. If there are no register before the
date, returns an empty register with the default columns.
"""
registers = registers[registers[date_col].notna()]
registers_before_date = registers[
registers[date_col].apply(parser.parse) <= date
]
if len(registers_before_date) == 0:
registers_before_date = pd.DataFrame([(np.nan for col in default_columns)], columns=default_columns)
last_register = registers_before_date.iloc[-1]
return last_register
# Theb, we need to group by patient all the dataframes we built previously.
grouped_dispneia = df_dispneia.groupby("record_id")
grouped_sofa = df_sofa.groupby("record_id")
grouped_saturacao_o2 = df_saturacao_o2.groupby("record_id")
grouped_saps_3 = df_saps_3.groupby("record_id")
grouped_image = df_image.groupby("record_id")
grouped_laboratory = df_laboratory.groupby("record_id")
grouped_ventilacao = df_ventilacao.groupby("record_id")
grouped_pronacao = df_pronacao.groupby("record_id")
grouped_hemodialise = df_hemodialise.groupby("record_id")
# Now we iterate over the personal data dataframe, which has one row per patient.
after_discharge = []
after_death = []
new_rows = []
for i, row in tqdm(df_personal_data.iterrows(), total=len(df_personal_data)):
record_id = row["record_id"]
institution = row["instituicao"]
hospitalization_date = row["data_admissao_hospitalar"]
discharge_date = row["data_alta_hospitalar"]
date_of_death = row["data_obito"]
if pd.notna(date_of_death):
date_of_death = parser.parse(date_of_death)
if pd.notna(discharge_date):
discharge_date = parser.parse(discharge_date)
if pd.notna(hospitalization_date):
hospitalization_date = parser.parse(hospitalization_date)
# Get each group and sort by the date
group_dispneia = get_group(
grouped_dispneia, record_id, df_dispneia.columns
).sort_values("data_dispneia")
group_sofa = get_group(
grouped_sofa, record_id, df_sofa.columns
)
group_saturacao_o2 = get_group(
grouped_saturacao_o2, record_id, df_saturacao_o2.columns
)
group_saps_3 = get_group(
grouped_saps_3, record_id, df_saps_3.columns
)
group_image = get_group(
grouped_image, record_id, df_image.columns
)
group_laboratory = get_group(
grouped_laboratory, record_id, df_laboratory.columns
)
group_ventilacao = get_group(
grouped_ventilacao, record_id, df_ventilacao.columns
)
group_pronacao = get_group(
grouped_pronacao, record_id, df_pronacao.columns
)
group_hemodialise = get_group(
grouped_hemodialise, record_id, df_hemodialise.columns
)
# List the dates available for the patient
patient_dates = set(filter(
pd.notna,
list(group_dispneia.data_dispneia) +
list(group_sofa.data_sofa) +
list(group_saturacao_o2.data_saturacao_o2) +
list(group_saps_3.data_saps_3) +
list(group_image.data_imagem) +
list(group_laboratory.data_resultados_lab) +
list(group_ventilacao.data_ventilacao) +
list(group_pronacao.data_pronacao) +
list(group_hemodialise.data_hemodialise)
))
patient_dates = funcy.lmap(parser.parse, patient_dates)
# Now we iterate over the dates of the patient retrieving the last register for
# each group.
new_patient_rows = []
for date_tmp in patient_dates:
# If the date is after the patient's death or the patient's discharge, we want to ignore
# the register.
if abs(date_tmp.year - dataset_date.year) > 0:
continue
if pd.notna(date_of_death) and date_tmp > date_of_death:
after_death.append(record_id)
continue
if pd.notna(discharge_date) and date_tmp > discharge_date:
after_discharge.append(discharge_date)
continue
last_register_dispneia = last_register_before_date(group_dispneia, "data_dispneia", date_tmp, df_dispneia.columns)
last_register_sofa = last_register_before_date(group_sofa, "data_sofa", date_tmp, df_sofa.columns)
last_register_saturacao_o2 = last_register_before_date(group_saturacao_o2, "data_saturacao_o2", date_tmp, df_saturacao_o2.columns)
last_register_saps_3 = last_register_before_date(group_saps_3, "data_saps_3", date_tmp, df_saps_3.columns)
last_register_image = last_register_before_date(group_image, "data_imagem", date_tmp, df_image.columns)
last_register_laboratory = last_register_before_date(group_laboratory, "data_resultados_lab", date_tmp, df_laboratory.columns)
last_register_pronacao = last_register_before_date(group_pronacao, "data_pronacao", date_tmp, df_pronacao.columns)
last_register_hemodialise = last_register_before_date(group_hemodialise, "data_hemodialise", date_tmp, df_hemodialise.columns)
# Need for mechanical ventilation is one of our target variables. Thus, we do not want to get the last register before the
# current date. We want to know if the patient ever needed mechanical ventilation at any point in time.
ventilacao = group_ventilacao[group_ventilacao.ventilacao == group_ventilacao.ventilacao.max()].sort_values("data_ventilacao", ascending=False)
if len(ventilacao) == 0:
ventilacao = pd.DataFrame([(np.nan for col in group_ventilacao.columns)], columns=group_ventilacao.columns)
ventilacao = ventilacao.iloc[-1]
new_row = {}
new_row.update(row)
new_row.update(dict(last_register_dispneia))
new_row.update(dict(last_register_sofa))
new_row.update(dict(last_register_saturacao_o2))
new_row.update(dict(last_register_saps_3))
new_row.update(dict(last_register_image))
new_row.update(dict(last_register_laboratory))
new_row.update(dict(last_register_pronacao))
new_row.update(dict(last_register_hemodialise))
new_row.update(dict(ventilacao))
new_row["data"] = date_tmp
new_row["record_id"] = record_id
new_row["instituicao"] = institution
new_row["dias_desde_admissao"] = (date_tmp - hospitalization_date).days if pd.notna(hospitalization_date) else np.nan
date_of_outcome = date_of_death if pd.notna(date_of_death) else discharge_date
new_row["dias_antes_desfecho"] = (date_of_outcome - date_tmp).days if pd.notna(date_of_outcome) else np.nan
new_patient_rows.append(new_row)
new_rows.extend(new_patient_rows)
df_final = pd.DataFrame(new_rows)
# We need to calculate some dummy variables for the categorical data.
padrao_rsna_dummies = pd.get_dummies(df_final.padrao_imagem_rsna, prefix="padrao_rsna")
ventilacao_dummies = pd.get_dummies(df_final.ventilacao, prefix="ventilacao")
neoplasia_dummies = pd.get_dummies(df_final.neoplasia, prefix="neoplasia")
irc_dummies = pd.get_dummies(df_final.irc, prefix="irc")
sexo_dummies = pd.get_dummies(df_final.sexo, prefix="sexo")
df_final = pd.concat([df_final,
padrao_rsna_dummies,
ventilacao_dummies,
neoplasia_dummies,
irc_dummies,
sexo_dummies], axis=1)
def calc_ventilation(row):
if pd.isna(row["ventilacao"]):
return row["ventilacao"]
return row["ventilacao_5.0"] or row["ventilacao_6.0"]
df_final["mechanical_ventilation"] = df_final.apply(calc_ventilation, axis=1)
# df_final.to_csv(os.path.join(BASE_DIR, "data", "covid19_final.csv"), index=False)
# And we want to have a separate file that includes only the data of patients that were hospitalized.
df_internacao = df_final[df_final.data_admissao_hospitalar.notna()].reset_index()
df_internacao.to_csv(os.path.join(BASE_DIR, "data", "covid19_internacao.csv"), index=False)
############################## Statistics ##############################
potentially_elegible = df_final.record_id.nunique()
elegible = df_internacao.record_id.nunique()
still_hospitalized = df_internacao[
(df_internacao.data_alta_hospitalar.isna()) & (df_internacao.data_obito.isna())
].record_id.nunique()
print(f"""
Potentially elegible participants = {potentially_elegible}
Elegible participants = {elegible}
Excluded (not hospitalized) = {potentially_elegible - elegible}
Index test = {elegible - still_hospitalized}
Excluded (still hospitalized) = {still_hospitalized}
""")
#################################### Plot joint distributions ######################################
save_path_joint = os.path.join(BASE_DIR, "desfechos_finais", "joint_normal_lungs_age.tiff")
plot_joint_distribution(df_internacao, save_path_joint, fformat="tiff")
################################### Segmentation vs Radiologist ####################################
plt.figure(figsize=(10, 10))
sum_score = (
df_final.score_tc_esq_sup + df_final.score_tc_esq_med + df_final.score_tc_esq_inf +
df_final.score_tc_dir_sup + df_final.score_tc_dir_med + df_final.score_tc_dir_inf
)
# UNCOMMENT FOR DEBUG
# df_final["sum_score"] = sum_score
# df_final["one_minus_normal"] = 1 - df_final.seg_normal
# df_final = df_final.sort_values("sum_score")
# import pdb
# pdb.set_trace()
corr_coeff = (1 - df_final.seg_normal).corr(sum_score)
corr_coeff_str = ("%.2f" % corr_coeff).lstrip("0")
plt.scatter(sum_score, 1 - df_final.seg_normal, c="royalblue",
label=f"Correlation coefficient = {corr_coeff_str}",
s=df_final.volume_pulmao.apply(lambda x: (2 * x + 1) ** 2),
alpha=0.7)
plt.xlabel("Ragiologist's score")
plt.ylabel("Affected lungs (%)")
props = dict(boxstyle="round", facecolor="snow", alpha=0.4)
textstr = f"Correlation coefficient = {corr_coeff_str}"
plt.text(0.05, 0.87, textstr, verticalalignment="center", bbox=props)
plt.grid(which="major")
plt.grid(which="minor", linestyle='--', alpha=0.4)
plt.minorticks_on()
save_path_corr = os.path.join(BASE_DIR, "exploratory", "rad_score_corr.tiff")
plt.savefig(save_path_corr, format="tiff", dpi=300)
plt.close()
|
the-stack_106_15679
|
from sqlalchemy import func
from sqlalchemy.sql.expression import label
from wtforms.validators import Required
from wikimetrics.utils import thirty_days_ago, today
from wikimetrics.models import Page, Revision, Archive
from wikimetrics.forms.fields import CommaSeparatedIntegerListField, BetterBooleanField
from timeseries_metric import TimeseriesMetric
class NamespaceEdits(TimeseriesMetric):
"""
This class implements namespace edits logic.
An instance of the class is callable and will compute the number of edits
for each user in a passed-in list.
This sql query was used as a starting point for the sqlalchemy query:
select user_id, count(*)
from (select r.rev_user as user_id
from revision r
inner join
page p on p.page_id = r.rev_page
where r.rev_timestamp between [start] and [end]
and r.rev_user in ([parameterized])
and p.page_namespace in ([parameterized])
union all
select a.ar_user as user_id
from archive a
where a.ar_timestamp between [start] and [end]
and a.ar_user in ([parameterized])
and a.ar_namespace in ([parameterized])
)
group by user_id
NOTE: on September 2014, this metric was updated to count archived revisions
this is now the default behavior, but is an option that you can turn off
"""
show_in_ui = True
id = 'edits'
label = 'Edits'
category = 'Content'
description = (
'Compute the number of edits in a specific'
'namespace of a mediawiki project'
)
default_result = {
'edits': 0,
}
include_deleted = BetterBooleanField(
default=True,
description='Count revisions made on deleted pages',
)
namespaces = CommaSeparatedIntegerListField(
None,
description='0, 2, 4, etc. (leave blank for *all*)',
)
def __call__(self, user_ids, session):
"""
Parameters:
user_ids : list of mediawiki user ids to find edit for
session : sqlalchemy session open on a mediawiki database
Returns:
dictionary from user ids to the number of edit found.
"""
start_date = self.start_date.data
end_date = self.end_date.data
revisions = session\
.query(
label('user_id', Revision.rev_user),
label('timestamp', Revision.rev_timestamp)
)\
.filter(Revision.rev_timestamp > start_date)\
.filter(Revision.rev_timestamp <= end_date)
archives = session\
.query(
label('user_id', Archive.ar_user),
label('timestamp', Archive.ar_timestamp)
)\
.filter(Archive.ar_timestamp > start_date)\
.filter(Archive.ar_timestamp <= end_date)
if self.namespaces.data and len(self.namespaces.data) > 0:
revisions = revisions.join(Page)\
.filter(Page.page_namespace.in_(self.namespaces.data))
archives = archives\
.filter(Archive.ar_namespace.in_(self.namespaces.data))
revisions = self.filter(revisions, user_ids, column=Revision.rev_user)
archives = self.filter(archives, user_ids, column=Archive.ar_user)
both = revisions
if self.include_deleted.data:
both = both.union_all(archives)
both = both.subquery()
query = session.query(both.c.user_id, func.count())\
.group_by(both.c.user_id)
query = self.apply_timeseries(query, column=both.c.timestamp)
return self.results_by_user(
user_ids,
query,
[(self.id, 1, 0)],
date_index=2,
)
|
the-stack_106_15680
|
import os
from pathlib import Path
from util.constants import *
from typing import Sequence, Dict, Callable
from tqdm import tqdm
from pre_processing.raw_features import read_raw_observations, add_rul
from rul_features.computed_features.frequency import fft_spectrum
from util.helper import concat_dfs
def compute_feature_data_frame(sub_set: str):
"""
Compute all features of all bearings and store them as a CSV
:param sub_set: data set sub set in {LEARNING_SET, TEST_SET, FULL_TEST_SET}
:return: Void, Write CSVs to file_system
"""
feature_list = ALL_FEATURES
data_set_subset_path_in = Path(DATA_SET_PATH).joinpath(sub_set)
data_set_subset_path_out = Path(PROCESSED_DATA_SET_PATH).joinpath(sub_set)
bearings_list = os.listdir(data_set_subset_path_in)
"""
Specify the columns that should be used from the raw data.
"""
types_infos = {
'acc': {'usecols': [0, 1, 2, 3, 4, 5], 'names': ['hour', 'min', 's', 'seg', 'h', 'v']}
}
print("Computing %d features, for %d bearings" % (len(feature_list), len(bearings_list)))
for i in range(len(bearings_list)):
bearing = bearings_list[i]
path_in = data_set_subset_path_in.joinpath(bearing)
path_out = data_set_subset_path_out.joinpath(bearing)
compute_csv_features(path_in=path_in, path_out=path_out,
types_infos=types_infos, feature_list=feature_list, bearing_num=i + 1)
def compute_csv_features(path_in, path_out, types_infos, feature_list: list, bearing_num: int = 0):
"""
:param path_in: Raw data set input path.
:param path_out: Output path where processed data is stored as a csv
:param types_infos: Specifies used columns of raw data
:param feature_list: dict that includes list of functions that compute features per observation
:return: Void, writes computed features into file system
"""
for file_type, type_infos in types_infos.items():
all_observations = read_raw_observations(path_in, file_type, type_infos)
all_observation_features = []
with tqdm(range(len(all_observations)), position=0, leave=True) as t:
for i in t:
t.set_description('Computing features for Bearing: %d' % bearing_num)
current_observation = all_observations[i]
all_observation_features = all_observation_features + [
compute_features_from_observation(current_observation,
feature_list, t)]
# Merge the computed features to one bearing data frame
merged_features = pd.DataFrame(all_observation_features)
# Add RUL label
add_rul(merged_features)
# Create processed data set directory
if not os.path.exists(path_out):
Path(path_out).mkdir(parents=True, exist_ok=True)
# Store as .csv
merged_features.to_csv("%s/%s" % (path_out, FEATURES_CSV_NAME), index=False, encoding='utf-8-sig')
def compute_features_from_observation(current_observation: pd.DataFrame, feature_functions: list, pbar: tqdm) -> Dict[
str, float]:
"""
Helper function that computes each feature per observation.
:param current_observation: observation data frame (len=2560)
:param feature_functions: Functions that compute a feature out of the current observation.
:param pbar: tqdm progress bar which postfix is changed depending on the feature being computed
:return: List of computed features.
"""
features = {}
for feature_function in feature_functions:
pbar.set_postfix({'feature': feature_function.__name__})
features['h_' + feature_function.__name__] = feature_function(current_observation, 'h')
features['v_' + feature_function.__name__] = feature_function(current_observation, 'v')
return features
def read_feature_dfs_as_dict(data_set_sub_set: str, csv_name=FEATURES_CSV_NAME) -> \
Dict[str, pd.DataFrame]:
"""
Reads all CSVs and compiles them into a data frame of a given sub set and CSV type
:param data_set_sub_set: {FULL_TEST_SET, LEARNING_SET, TEST_SET} from constants.py
:param csv_name: type of features that are to be read
:return: list of read and compiled data frames
"""
path = Path(PROCESSED_DATA_SET_PATH).joinpath(data_set_sub_set)
bearing_list = [name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name))]
bearing_list = sorted(bearing_list)
df_dict = {}
for bearing in tqdm(bearing_list, desc="Reading computed features of bearings from: %s" % csv_name):
df_dict[bearing] = pd.read_csv(Path.joinpath(path, bearing, csv_name))
return df_dict
def read_feature_dfs_as_dataframe(data_set_sub_set: str, csv_name=FEATURES_CSV_NAME) -> (pd.DataFrame, pd.Series):
"""
Reads all CSVs and compiles them into a data frame of a given sub set and CSV type
:param data_set_sub_set: {FULL_TEST_SET, LEARNING_SET, TEST_SET} from constants.py
:param features: type of features that are to be read
:return: list of read and compiled data frames
"""
df_dict = read_feature_dfs_as_dict(data_set_sub_set=data_set_sub_set, csv_name=csv_name)
return df_dict_to_df_dataframe(df_dict)
def df_dict_to_df_dataframe(df_dict: Dict[str, pd.DataFrame]) -> (pd.DataFrame, pd.Series):
data = concat_dfs(df_dict)
labels = data.pop("RUL")
return data, labels
|
the-stack_106_15681
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: val4mat.py
#
# Tests: mesh - 2d structured
# plots - pc
#
# Notes
#
# Programmer: Cyrus Harrison
# Date: Tuesday 12, 2008
#
# Modifiations:
#
# ----------------------------------------------------------------------------
OpenDatabase(silo_data_path("specmix_quad.silo"))
atts = PseudocolorAttributes()
atts.minFlag = 1
atts.min = 0.0
atts.maxFlag = 1
atts.max = 1.0
SetDefaultPlotOptions(atts)
# view the per material values for each of the 3 materials
DefineScalarExpression("spec_mix", "specmf(Species,1,1)")
AddPlot("Pseudocolor", "spec_mix")
DrawPlots()
Test("specmf_0")
OpenDatabase(silo_data_path("specmix_double_quad.silo"))
AddPlot("Pseudocolor", "spec_mix")
DrawPlots()
Test("specmf_1")
Exit()
|
the-stack_106_15682
|
#!/usr/bin/python3 -i
#
# Copyright (c) 2020 LunarG, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
from base_generator import BaseGenerator, BaseGeneratorOptions, ValueInfo, write
class VulkanReferencedResourceBodyGeneratorOptions(BaseGeneratorOptions):
"""Options for generating a C++ class for detecting unreferenced resource handles in a capture file."""
def __init__(
self,
blacklists=None, # Path to JSON file listing apicalls and structs to ignore.
platform_types=None, # Path to JSON file listing platform (WIN32, X11, etc.) defined types.
filename=None,
directory='.',
prefix_text='',
protect_file=False,
protect_feature=True,
extraVulkanHeaders=[]
):
BaseGeneratorOptions.__init__(
self,
blacklists,
platform_types,
filename,
directory,
prefix_text,
protect_file,
protect_feature,
extraVulkanHeaders=extraVulkanHeaders
)
class VulkanReferencedResourceBodyGenerator(BaseGenerator):
"""VulkanReferencedResourceBodyGenerator - subclass of BaseGenerator.
Generates C++ member definitions for the VulkanReferencedResource class responsible for
determining which resource handles are used or unused in a capture file.
Generate a C++ class for detecting unreferenced resource handles in a capture file.
"""
# All resource and resource associated handle types to be processed.
RESOURCE_HANDLE_TYPES = [
'VkBuffer', 'VkImage', 'VkBufferView', 'VkImageView', 'VkFramebuffer',
'VkDescriptorSet', 'VkCommandBuffer'
]
# Handle types that contain resource and child resource handle types.
CONTAINER_HANDLE_TYPES = ['VkDescriptorSet']
# Handle types that use resource and child resource handle types.
USER_HANDLE_TYPES = ['VkCommandBuffer']
def __init__(
self, err_file=sys.stderr, warn_file=sys.stderr, diag_file=sys.stdout
):
BaseGenerator.__init__(
self,
process_cmds=True,
process_structs=True,
feature_break=False,
err_file=err_file,
warn_file=warn_file,
diag_file=diag_file
)
# Map of Vulkan structs containing handles to a list values for handle members or struct members
# that contain handles (eg. VkGraphicsPipelineCreateInfo contains a VkPipelineShaderStageCreateInfo
# member that contains handles).
self.structs_with_handles = dict()
self.pnext_structs = dict(
) # Map of Vulkan structure types to sType value for structs that can be part of a pNext chain.
self.command_info = dict() # Map of Vulkan commands to parameter info
self.restrict_handles = True # Determines if the 'is_handle' override limits the handle test to only the values conained by RESOURCE_HANDLE_TYPES.
def beginFile(self, gen_opts):
"""Method override."""
BaseGenerator.beginFile(self, gen_opts)
write(
'#include "generated/generated_vulkan_referenced_resource_consumer.h"',
file=self.outFile
)
self.newline()
write('#include <cassert>', file=self.outFile)
self.newline()
write('GFXRECON_BEGIN_NAMESPACE(gfxrecon)', file=self.outFile)
write('GFXRECON_BEGIN_NAMESPACE(decode)', file=self.outFile)
def endFile(self):
"""Method override."""
for cmd, info in self.command_info.items():
return_type = info[0]
params = info[2]
if params and params[0].base_type == 'VkCommandBuffer':
# Check for parameters with resource handle types.
handles = self.get_param_list_handles(params[1:])
if (handles):
# Generate a function to add handles to the command buffer's referenced handle list.
cmddef = '\n'
# Temporarily remove resource only matching restriction from is_handle() when generating the function signature.
self.restrict_handles = False
cmddef += self.make_consumer_func_decl(
return_type,
'VulkanReferencedResourceConsumer::Process_' + cmd,
params
) + '\n'
self.restrict_handles = True
cmddef += '{\n'
indent = self.INDENT_SIZE * ' '
# Add unreferenced parameter macros.
unref_count = 0
for param in params[1:]:
if param not in handles:
cmddef += indent + 'GFXRECON_UNREFERENCED_PARAMETER({});\n'.format(
param.name
)
unref_count += 1
if unref_count > 0:
cmddef += '\n'
for index, handle in enumerate(handles):
cmddef += self.track_command_handle(
index, params[0].name, handle, indent=indent
)
cmddef += '}'
write(cmddef, file=self.outFile)
self.newline()
write('GFXRECON_END_NAMESPACE(decode)', file=self.outFile)
write('GFXRECON_END_NAMESPACE(gfxrecon)', file=self.outFile)
# Finish processing in superclass
BaseGenerator.endFile(self)
def genStruct(self, typeinfo, typename, alias):
"""Method override."""
BaseGenerator.genStruct(self, typeinfo, typename, alias)
if not alias:
self.check_struct_member_handles(
typename, self.structs_with_handles
)
# Track this struct if it can be present in a pNext chain.
parent_structs = typeinfo.elem.get('structextends')
if parent_structs:
stype = self.make_structure_type_enum(typeinfo, typename)
if stype:
self.pnext_structs[typename] = stype
def need_feature_generation(self):
"""Indicates that the current feature has C++ code to generate."""
if self.feature_cmd_params:
return True
return False
def generate_feature(self):
"""Performs C++ code generation for the feature."""
for cmd in self.get_filtered_cmd_names():
self.command_info[cmd] = self.feature_cmd_params[cmd]
def is_handle(self, base_type):
"""Override method to check for handle type, only matching resource handle types."""
if self.restrict_handles:
if base_type in self.RESOURCE_HANDLE_TYPES:
return True
return False
else:
return BaseGenerator.is_handle(self, base_type)
def get_param_list_handles(self, values):
"""Create list of parameters that have handle types or are structs that contain handles."""
handles = []
for value in values:
if self.is_handle(value.base_type):
handles.append(value)
elif self.is_struct(
value.base_type
) and (value.base_type in self.structs_with_handles):
handles.append(value)
return handles
def track_command_handle(
self, index, command_param_name, value, value_prefix='', indent=''
):
body = ''
tail = ''
index_name = None
count_name = None
value_name = value_prefix + value.name
is_handle = self.is_handle(value.base_type)
if (
value.is_pointer or value.is_array
) and value.name != 'pnext_value':
if index > 0:
body += '\n'
access_operator = '->'
if not value_prefix:
# If there is no prefix, this is the pointer parameter received by the function, which should never be null.
body += indent + 'assert({} != nullptr);\n'.format(value.name)
body += '\n'
else:
# If there is a prefix, this is a struct member. We need to determine the type of access operator to use
# for the member of a 'decoded' struct type, where handle member types will be HandlePointerDecoder, but
# struct member types will be unique_ptr<StructPointerDecoder>.
if is_handle:
access_operator = '.'
# Add IsNull and HasData checks for the pointer decoder, before accessing its data.
# Note that this does not handle the decoded struct member cases for static arrays, which would need to use '.' instead of '->'.
body += indent + 'if (!{prefix}{name}{op}IsNull() && ({prefix}{name}{op}HasData()))\n'.format(
prefix=value_prefix, name=value.name, op=access_operator
)
body += indent + '{\n'
tail = indent + '}\n' + tail
indent += ' ' * self.INDENT_SIZE
# Get the pointer from the pointer decoder object.
value_name = '{}_ptr'.format(value.name)
if is_handle:
body += indent + 'auto {} = {}{}{}GetPointer();\n'.format(
value_name, value_prefix, value.name, access_operator
)
else:
body += indent + 'auto {} = {}{}{}GetMetaStructPointer();\n'.format(
value_name, value_prefix, value.name, access_operator
)
# Add a for loop for an array of values.
if value.is_array:
index_name = '{}_index'.format(value.name)
count_name = '{}_count'.format(value.name)
body += indent + 'size_t {} = {}{}{}GetLength();\n'.format(
count_name, value_prefix, value.name, access_operator
)
body += indent + 'for (size_t {i} = 0; {i} < {}; ++{i})\n'.format(
count_name, i=index_name
)
body += indent + '{\n'
tail = indent + '}\n' + tail
indent += ' ' * self.INDENT_SIZE
# Insert commands to add handles to a container, or to process struct members that contain handles.
if is_handle:
if value.is_array:
value_name = '{}[{}]'.format(value_name, index_name)
elif value.is_pointer:
value_name = '(*{})'.format(value_name)
if value.base_type in self.CONTAINER_HANDLE_TYPES:
body += indent + 'GetTable().AddContainerToUser({}, {});\n'.format(
command_param_name, value_name
)
elif value.base_type in self.USER_HANDLE_TYPES:
body += indent + 'GetTable().AddUserToUser({}, {});\n'.format(
command_param_name, value_name
)
else:
body += indent + 'GetTable().AddResourceToUser({}, {});\n'.format(
command_param_name, value_name
)
elif self.is_struct(
value.base_type
) and (value.base_type in self.structs_with_handles):
if value.is_array:
access_operator = '[{}].'.format(index_name)
else:
access_operator = '->'
for index, entry in enumerate(
self.structs_with_handles[value.base_type]
):
if entry.name == 'pNext':
ext_structs_with_handles = [
ext_struct for ext_struct in
self.registry.validextensionstructs[value.base_type]
if ext_struct in self.structs_with_handles
]
if ext_structs_with_handles:
body += indent + 'const VkBaseInStructure* pnext_header = nullptr;\n'
body += indent + 'if ({name}->pNext != nullptr)\n'.format(
name=value_name
)
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'pnext_header = reinterpret_cast<const VkBaseInStructure*>({}->pNext->GetPointer());\n'.format(
value_name
)
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
body += indent + 'while (pnext_header)\n'
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'switch (pnext_header->sType)\n'
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'default:\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'break;\n'
indent = indent[:-self.INDENT_SIZE]
for ext_struct in ext_structs_with_handles:
body += indent + 'case {}:\n'.format(
self.pnext_structs[ext_struct]
)
body += indent + '{\n'
indent += ' ' * self.INDENT_SIZE
body += indent + 'auto pnext_value = reinterpret_cast<const Decoded_{}*>({}->pNext->GetPointer());\n'.format(
ext_struct, value_name
)
body += self.track_command_handle(
index,
command_param_name,
ValueInfo(
'pnext_value', ext_struct,
'const {} *'.format(ext_struct), 1
),
'',
indent=indent
)
body += indent + 'break;\n'
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
body += indent + 'pnext_header = pnext_header->pNext;\n'
indent = indent[:-self.INDENT_SIZE]
body += indent + '}\n'
else:
body += self.track_command_handle(
index, command_param_name, entry,
value_name + access_operator, indent
)
return body + tail
|
the-stack_106_15683
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for HamiltonianMonteCarlo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import warnings
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.mcmc.hmc import _compute_log_acceptance_correction
def _set_seed(seed):
"""Helper which uses graph seed if using eager."""
# TODO(b/68017812): Deprecate once eager correctly supports seed.
if tf.executing_eagerly():
tf.random.set_seed(seed)
return None
return seed
@test_util.test_all_tf_execution_regimes
class HMCTest(test_util.TestCase):
def setUp(self):
self._shape_param = 5.
self._rate_param = 10.
super(HMCTest, self).setUp()
tf.random.set_seed(10003)
np.random.seed(10003)
def assertAllFinite(self, x):
self.assertAllEqual(np.ones_like(x).astype(bool), np.isfinite(x))
def _log_gamma_log_prob(self, x, event_dims=()):
"""Computes log-pdf of a log-gamma random variable.
Args:
x: Value of the random variable.
event_dims: Dimensions not to treat as independent.
Returns:
log_prob: The log-pdf up to a normalizing constant.
"""
return tf.reduce_sum(
self._shape_param * x - self._rate_param * tf.math.exp(x),
axis=event_dims)
def testSampleChainSeedReproducibleWorksCorrectly(self):
num_results = 10
independent_chain_ndims = 1
def log_gamma_log_prob(x):
event_dims = tf.range(independent_chain_ndims, tf.rank(x))
return self._log_gamma_log_prob(x, event_dims)
current_state = np.random.rand(4, 3, 2)
samples0, kernel_results0 = tfp.mcmc.sample_chain(
num_results=2 * num_results,
num_steps_between_results=0,
# Following args are identical to below.
current_state=current_state,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_gamma_log_prob,
step_size=0.1,
num_leapfrog_steps=2,
seed=_set_seed(52)),
num_burnin_steps=150,
parallel_iterations=1)
samples1, kernel_results1 = tfp.mcmc.sample_chain(
num_results=num_results,
num_steps_between_results=1,
# Following args are identical to above.
current_state=current_state,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_gamma_log_prob,
step_size=0.1,
num_leapfrog_steps=2,
seed=_set_seed(52)),
num_burnin_steps=150,
parallel_iterations=1)
[
samples0_,
samples1_,
target_log_prob0_,
target_log_prob1_,
] = self.evaluate([
samples0,
samples1,
kernel_results0.accepted_results.target_log_prob,
kernel_results1.accepted_results.target_log_prob,
])
self.assertAllClose(samples0_[::2], samples1_,
atol=1e-5, rtol=1e-5)
self.assertAllClose(target_log_prob0_[::2], target_log_prob1_,
atol=1e-5, rtol=1e-5)
def _chain_gets_correct_expectations(self, x, independent_chain_ndims):
counter = collections.Counter()
def log_gamma_log_prob(x):
counter['target_calls'] += 1
event_dims = tf.range(independent_chain_ndims, tf.rank(x))
return self._log_gamma_log_prob(x, event_dims)
samples, kernel_results = tfp.mcmc.sample_chain(
num_results=150,
current_state=x,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_gamma_log_prob,
step_size=0.05,
num_leapfrog_steps=2,
seed=_set_seed(42)),
num_burnin_steps=150,
parallel_iterations=1)
if tf.executing_eagerly():
# TODO(b/79991421): Figure out why this is approx twice as many as it
# should be. I.e., `expected_calls = (150 + 150) * 2 + 1`.
expected_calls = 1202
else:
expected_calls = 4
self.assertAllEqual(dict(target_calls=expected_calls), counter)
expected_x = (tf.math.digamma(self._shape_param) - np.log(self._rate_param))
expected_exp_x = self._shape_param / self._rate_param
log_accept_ratio_, samples_, expected_x_ = self.evaluate(
[kernel_results.log_accept_ratio, samples, expected_x])
actual_x = samples_.mean()
actual_exp_x = np.exp(samples_).mean()
acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))
tf1.logging.vlog(
1, 'True E[x, exp(x)]: {}\t{}'.format(expected_x_, expected_exp_x))
tf1.logging.vlog(
1, 'Estimated E[x, exp(x)]: {}\t{}'.format(actual_x, actual_exp_x))
self.assertNear(actual_x, expected_x_, 3e-2)
self.assertNear(actual_exp_x, expected_exp_x, 2e-2)
self.assertAllEqual(np.ones_like(acceptance_probs, np.bool),
acceptance_probs > 0.5)
self.assertAllEqual(np.ones_like(acceptance_probs, np.bool),
acceptance_probs <= 1.)
def _chain_gets_correct_expectations_wrapper(self, independent_chain_ndims):
x = tf.constant(np.random.rand(50, 10, 2), np.float32, name='x')
self._chain_gets_correct_expectations(x, independent_chain_ndims)
def testHMCChainExpectationsNullShape(self):
self._chain_gets_correct_expectations_wrapper(0)
def testHMCChainExpectations1(self):
self._chain_gets_correct_expectations_wrapper(1)
def testHMCChainExpectations2(self):
self._chain_gets_correct_expectations_wrapper(2)
def testKernelResultsUsingTruncatedDistribution(self):
def log_prob(x):
return tf.where(x < 0., tf.constant(-np.inf, x.dtype), -x - x**2)
# This log_prob has the property that it is likely to attract
# the flow toward, and below, zero...but for x <=0,
# log_prob(x) = -inf, which should result in rejection, as well
# as a non-finite log_prob. Thus, this distribution gives us an opportunity
# to test out the kernel results ability to correctly capture rejections due
# to finite AND non-finite reasons.
# Why use a non-constant gradient? This ensures the leapfrog integrator
# will not be exact.
num_results = 1000
# Large step size, will give rejections due to integration error in addition
# to rejection due to going into a region of log_prob = -inf.
step_size = 0.2
num_leapfrog_steps = 5
num_chains = 2
# Start multiple independent chains.
initial_state = tf.convert_to_tensor([0.1] * num_chains)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=initial_state,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_prob,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps,
seed=_set_seed(42)),
parallel_iterations=1)
states_, kernel_results_ = self.evaluate([states, kernel_results])
pstates_ = kernel_results_.proposed_state
neg_inf_mask = np.isneginf(
kernel_results_.proposed_results.target_log_prob)
# First: Test that the mathematical properties of the above log prob
# function in conjunction with HMC show up as expected in kernel_results_.
# We better have log_prob = -inf some of the time.
self.assertLess(0, neg_inf_mask.sum())
# We better have some rejections due to something other than -inf.
self.assertLess(neg_inf_mask.sum(), (~kernel_results_.is_accepted).sum())
# We better have accepted a decent amount, even near end of the chain.
self.assertLess(
0.1, kernel_results_.is_accepted[int(0.9 * num_results):].mean())
# We better not have any NaNs in states or log_prob.
# We may have some NaN in grads, which involve multiplication/addition due
# to gradient rules. This is the known "NaN grad issue with tf.where."
self.assertAllEqual(
np.zeros_like(states_),
np.isnan(kernel_results_.proposed_results.target_log_prob))
self.assertAllEqual(
np.zeros_like(states_),
np.isnan(states_))
# We better not have any +inf in states, grads, or log_prob.
self.assertAllEqual(
np.zeros_like(states_),
np.isposinf(kernel_results_.proposed_results.target_log_prob))
self.assertAllEqual(
np.zeros_like(states_),
np.isposinf(
kernel_results_.proposed_results.grads_target_log_prob[0]))
self.assertAllEqual(np.zeros_like(states_),
np.isposinf(states_))
# Second: Test that kernel_results is congruent with itself and
# acceptance/rejection of states.
# Proposed state is negative iff proposed target log prob is -inf.
np.testing.assert_array_less(pstates_[neg_inf_mask], 0.)
np.testing.assert_array_less(0., pstates_[~neg_inf_mask])
# Acceptance probs are zero whenever proposed state is negative.
acceptance_probs = np.exp(np.minimum(
kernel_results_.log_accept_ratio, 0.))
self.assertAllEqual(
np.zeros_like(pstates_[neg_inf_mask]),
acceptance_probs[neg_inf_mask])
# The move is accepted ==> state = proposed state.
self.assertAllEqual(
states_[kernel_results_.is_accepted],
pstates_[kernel_results_.is_accepted],
)
# The move was rejected <==> state[t] == state[t - 1].
for t in range(1, num_results):
for i in range(num_chains):
if kernel_results_.is_accepted[t, i]:
self.assertNotEqual(states_[t, i], states_[t - 1, i])
else:
self.assertEqual(states_[t, i], states_[t - 1, i])
def _kernel_leaves_target_invariant(self, initial_draws,
independent_chain_ndims):
def log_gamma_log_prob(x):
event_dims = tf.range(independent_chain_ndims, tf.rank(x))
return self._log_gamma_log_prob(x, event_dims)
def fake_log_prob(x):
"""Cooled version of the target distribution."""
return 1.1 * log_gamma_log_prob(x)
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=log_gamma_log_prob,
step_size=0.4,
num_leapfrog_steps=5,
seed=_set_seed(43))
sample, kernel_results = hmc.one_step(
current_state=initial_draws,
previous_kernel_results=hmc.bootstrap_results(initial_draws))
bad_hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=fake_log_prob,
step_size=0.4,
num_leapfrog_steps=5,
seed=_set_seed(44))
bad_sample, bad_kernel_results = bad_hmc.one_step(
current_state=initial_draws,
previous_kernel_results=bad_hmc.bootstrap_results(initial_draws))
[
log_accept_ratio_,
bad_log_accept_ratio_,
initial_draws_,
updated_draws_,
fake_draws_,
] = self.evaluate([
kernel_results.log_accept_ratio,
bad_kernel_results.log_accept_ratio,
initial_draws,
sample,
bad_sample,
])
# Confirm step size is small enough that we usually accept.
acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))
bad_acceptance_probs = np.exp(np.minimum(bad_log_accept_ratio_, 0.))
self.assertGreater(acceptance_probs.mean(), 0.5)
self.assertGreater(bad_acceptance_probs.mean(), 0.5)
# Confirm step size is large enough that we sometimes reject.
self.assertLess(acceptance_probs.mean(), 0.99)
self.assertLess(bad_acceptance_probs.mean(), 0.99)
_, ks_p_value_true = stats.ks_2samp(initial_draws_.flatten(),
updated_draws_.flatten())
_, ks_p_value_fake = stats.ks_2samp(initial_draws_.flatten(),
fake_draws_.flatten())
tf1.logging.vlog(
1,
'acceptance rate for true target: {}'.format(acceptance_probs.mean()))
tf1.logging.vlog(
1, 'acceptance rate for fake target: {}'.format(
bad_acceptance_probs.mean()))
tf1.logging.vlog(
1, 'K-S p-value for true target: {}'.format(ks_p_value_true))
tf1.logging.vlog(
1, 'K-S p-value for fake target: {}'.format(ks_p_value_fake))
# Make sure that the MCMC update hasn't changed the empirical CDF much.
self.assertGreater(ks_p_value_true, 1e-3)
# Confirm that targeting the wrong distribution does
# significantly change the empirical CDF.
self.assertLess(ks_p_value_fake, 1e-6)
def _kernel_leaves_target_invariant_wrapper(self, independent_chain_ndims):
"""Tests that the kernel leaves the target distribution invariant.
Draws some independent samples from the target distribution,
applies an iteration of the MCMC kernel, then runs a
Kolmogorov-Smirnov test to determine if the distribution of the
MCMC-updated samples has changed.
We also confirm that running the kernel with a different log-pdf
does change the target distribution. (And that we can detect that.)
Args:
independent_chain_ndims: Python `int` scalar representing the number of
dims associated with independent chains.
"""
initial_draws = np.log(np.random.gamma(self._shape_param,
size=[50000, 2, 2]))
initial_draws -= np.log(self._rate_param)
x = tf.constant(initial_draws, np.float32)
self._kernel_leaves_target_invariant(x, independent_chain_ndims)
def testKernelLeavesTargetInvariant1(self):
self._kernel_leaves_target_invariant_wrapper(1)
def testKernelLeavesTargetInvariant2(self):
self._kernel_leaves_target_invariant_wrapper(2)
def testKernelLeavesTargetInvariant3(self):
self._kernel_leaves_target_invariant_wrapper(3)
def testNanRejection(self):
"""Tests that an update that yields NaN potentials gets rejected.
We run HMC with a target distribution that returns NaN
log-likelihoods if any element of x < 0, and unit-scale
exponential log-likelihoods otherwise. The exponential potential
pushes x towards 0, ensuring that any reasonably large update will
push us over the edge into NaN territory.
"""
def _unbounded_exponential_log_prob(x):
"""An exponential distribution with log-likelihood NaN for x < 0."""
per_element_potentials = tf.where(
x < 0., tf.constant(np.nan, x.dtype), -x)
return tf.reduce_sum(per_element_potentials)
initial_x = tf.linspace(0.01, 5, 10)
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=_unbounded_exponential_log_prob,
step_size=2.,
num_leapfrog_steps=5,
seed=_set_seed(46))
updated_x, kernel_results = hmc.one_step(
current_state=initial_x,
previous_kernel_results=hmc.bootstrap_results(initial_x))
initial_x_, updated_x_, log_accept_ratio_ = self.evaluate(
[initial_x, updated_x, kernel_results.log_accept_ratio])
acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))
tf1.logging.vlog(1, 'initial_x = {}'.format(initial_x_))
tf1.logging.vlog(1, 'updated_x = {}'.format(updated_x_))
tf1.logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_))
self.assertAllEqual(initial_x_, updated_x_)
self.assertEqual(acceptance_probs, 0.)
def testNanFromGradsDontPropagate(self):
"""Test that update with NaN gradients does not cause NaN in results."""
if tf1.control_flow_v2_enabled():
self.skipTest('b/138796859')
if tf.executing_eagerly(): return
def _nan_log_prob_with_nan_gradient(x):
return np.nan * tf.reduce_sum(x)
initial_x = tf.linspace(0.01, 5, 10)
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=_nan_log_prob_with_nan_gradient,
step_size=2.,
num_leapfrog_steps=5,
seed=_set_seed(47))
updated_x, kernel_results = hmc.one_step(
current_state=initial_x,
previous_kernel_results=hmc.bootstrap_results(initial_x))
initial_x_, updated_x_, log_accept_ratio_ = self.evaluate(
[initial_x, updated_x, kernel_results.log_accept_ratio])
acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))
tf1.logging.vlog(1, 'initial_x = {}'.format(initial_x_))
tf1.logging.vlog(1, 'updated_x = {}'.format(updated_x_))
tf1.logging.vlog(1, 'log_accept_ratio = {}'.format(log_accept_ratio_))
self.assertAllEqual(initial_x_, updated_x_)
self.assertEqual(acceptance_probs, 0.)
self.assertAllEqual([True], [
g is None for g in tf.gradients(
ys=kernel_results.proposed_results.grads_target_log_prob,
xs=initial_x)
])
self.assertAllFinite(
self.evaluate(tf.gradients(ys=updated_x, xs=initial_x)[0]))
# Gradients of the acceptance probs and new log prob are not finite.
# self.assertAllFinite(
# self.evaluate(tf.gradients(acceptance_probs, initial_x)[0]))
# self.assertAllFinite(
# self.evaluate(tf.gradients(new_log_prob, initial_x)[0]))
def _testChainWorksDtype(self, dtype):
states, kernel_results = tfp.mcmc.sample_chain(
num_results=10,
current_state=np.zeros(5).astype(dtype),
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=lambda x: -tf.reduce_sum(x**2., axis=-1),
step_size=0.01,
num_leapfrog_steps=10,
seed=_set_seed(48)),
parallel_iterations=1)
states_, log_accept_ratio_ = self.evaluate(
[states, kernel_results.log_accept_ratio])
self.assertEqual(dtype, states_.dtype)
self.assertEqual(dtype, log_accept_ratio_.dtype)
def testChainWorksIn64Bit(self):
self._testChainWorksDtype(np.float64)
def testChainWorksIn16Bit(self):
self._testChainWorksDtype(np.float16)
def testChainWorksCorrelatedMultivariate(self):
dtype = np.float32
true_mean = dtype([0, 0])
true_cov = dtype([[1, 0.5],
[0.5, 1]])
num_results = 1500
counter = collections.Counter()
def target_log_prob(x, y):
counter['target_calls'] += 1
# Corresponds to unnormalized MVN.
# z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
z = tf.stack([x, y], axis=-1) - true_mean
z = tf.squeeze(
tf.linalg.triangular_solve(
np.linalg.cholesky(true_cov),
z[..., tf.newaxis]),
axis=-1)
return -0.5 * tf.reduce_sum(z**2., axis=-1)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=[dtype(-2), dtype(2)],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob,
step_size=[1.23, 1.23],
num_leapfrog_steps=2,
seed=_set_seed(54)),
num_burnin_steps=200,
parallel_iterations=1)
if tf.executing_eagerly():
# TODO(b/79991421): Figure out why this is approx twice as many as it
# should be. I.e., `expected_calls = (num_results + 200) * 2 * 2 + 1`.
expected_calls = 6802
else:
expected_calls = 4
self.assertAllEqual(dict(target_calls=expected_calls), counter)
states = tf.stack(states, axis=-1)
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(states, axis=0)
x = states - sample_mean
sample_cov = tf.matmul(x, x, transpose_a=True) / dtype(num_results)
[sample_mean_, sample_cov_, is_accepted_] = self.evaluate([
sample_mean, sample_cov, kernel_results.is_accepted])
self.assertNear(0.6, is_accepted_.mean(), err=0.05)
self.assertAllClose(true_mean, sample_mean_,
atol=0.06, rtol=0.)
self.assertAllClose(true_cov, sample_cov_,
atol=0., rtol=0.2)
def testUncalibratedHMCPreservesStaticShape(self):
uncal_hmc = tfp.mcmc.UncalibratedHamiltonianMonteCarlo(
target_log_prob_fn=lambda x: -tf.reduce_sum(x**2., axis=-1),
step_size=0.5,
num_leapfrog_steps=2,
seed=_set_seed(1042))
x0 = tf.constant([[-1., 0.5],
[0., 0.],
[1., 1.25]])
r0 = uncal_hmc.bootstrap_results(x0)
x1, r1 = uncal_hmc.one_step(x0, r0)
self.assertAllEqual([3, 2], x0.shape)
self.assertAllEqual([3], r0.target_log_prob.shape)
self.assertAllEqual([3, 2], x1.shape)
self.assertAllEqual([3], r1.target_log_prob.shape)
def testHMCPreservesStaticShape(self):
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=lambda x: -tf.reduce_sum(x**2., axis=-1),
step_size=0.5,
num_leapfrog_steps=2,
seed=_set_seed(1042))
x0 = tf.constant([[-1., 0.5],
[0., 0.],
[1., 1.25]])
r0 = hmc.bootstrap_results(x0)
x1, r1 = hmc.one_step(x0, r0)
self.assertAllEqual([3, 2], x0.shape)
self.assertAllEqual([3], r0.accepted_results.target_log_prob.shape)
self.assertAllEqual([3, 2], x1.shape)
self.assertAllEqual([3], r1.accepted_results.target_log_prob.shape)
def testHMCIsCalibrated(self):
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=lambda x: -tf.square(x) / 2.,
step_size=0.5,
num_leapfrog_steps=2,
)
self.assertTrue(hmc.is_calibrated)
def testUncalibratedHMCIsNotCalibrated(self):
uncal_hmc = tfp.mcmc.UncalibratedHamiltonianMonteCarlo(
target_log_prob_fn=lambda x: -tf.square(x) / 2.,
step_size=0.5,
num_leapfrog_steps=2,
)
self.assertFalse(uncal_hmc.is_calibrated)
def testAdaptiveParameters(self):
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=lambda x: -x**2.,
step_size=0.5,
num_leapfrog_steps=2,
seed=_set_seed(1042),
store_parameters_in_results=True)
x0 = tf.zeros(2)
r0 = hmc.bootstrap_results(x0)
x1, r1 = hmc.one_step(x0, r0)
r1_zero = r1._replace(
accepted_results=r1.accepted_results._replace(
step_size=tf.constant(0.)))
x2, r2 = hmc.one_step(x1, r1_zero)
r0_, r1_, r2_, x1_, x2_ = self.evaluate([r0, r1, r2, x1, x2])
self.assertAllEqual(0.5, r0_.accepted_results.step_size)
self.assertAllEqual(2, r0_.accepted_results.num_leapfrog_steps)
self.assertAllEqual(0.5, r1_.accepted_results.step_size)
self.assertAllEqual(2, r1_.accepted_results.num_leapfrog_steps)
self.assertAllEqual(0., r2_.accepted_results.step_size)
self.assertAllEqual(2, r2_.accepted_results.num_leapfrog_steps)
# Since step size is 0, we shouldn't have moved despite being accepted.
self.assertAllEqual(x2_, x1_)
self.assertAllEqual([True, True], r2_.is_accepted)
def testWarnMutableParameters(self):
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as triggered:
tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=lambda x: -x**2.,
num_leapfrog_steps=tf.Variable(2.),
step_size=tf.Variable(0.1),
store_parameters_in_results=False)
self.assertTrue(
any('Please consult the docstring' in str(warning.message)
for warning in triggered))
def testSoftplusCreatedOutsideKernelDoesNotKillGradients(self):
softplus = tfp.bijectors.Softplus()
def target_log_prob_fn(x):
x = softplus.forward(x)
return -tf.reduce_sum(x**2., axis=-1)
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.5,
num_leapfrog_steps=2,
seed=_set_seed(1042))
x0 = tf.constant([[-1., 0.5], [0., 0.], [1., 1.25]])
# Simply calling hmc.bootstrap_results(x0) used to fail with
# ValueError: Encountered `None` gradient.
r0 = hmc.bootstrap_results(x0)
# Also ensure eval doesn't crash things.
self.evaluate(r0)
class _LogCorrectionTest(object):
def testHandlesNanFromPotential(self):
tlp = [1, np.inf, -np.inf, np.nan]
target_log_prob, proposed_target_log_prob = [
self.dtype(x.flatten()) for x in np.meshgrid(tlp, tlp)]
num_chains = len(target_log_prob)
x0 = np.zeros(num_chains, dtype=self.dtype)
def make_trick_fun(f):
f_x = tf.convert_to_tensor(f)
def _fn(x):
# We'll make the gradient be `1` regardless of input.
return f_x + (x - tf.stop_gradient(x))
return _fn
# Use trick fun to get "current" results.
pkr = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=make_trick_fun(target_log_prob),
step_size=1.,
num_leapfrog_steps=1).bootstrap_results(x0)
# Use trick fun to inject "proposed" results.
_, results = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=make_trick_fun(proposed_target_log_prob),
step_size=1.,
num_leapfrog_steps=1).one_step(x0, pkr)
[actual_log_accept_ratio_, actual_grads_target_log_prob_] = self.evaluate([
results.log_accept_ratio,
results.accepted_results.grads_target_log_prob])
# First log(accept_ratio) is finite, rest are weird so reject them.
self.assertTrue(np.isfinite(actual_log_accept_ratio_[0]))
self.assertAllEqual(self.dtype([-np.inf]*(num_chains - 1)),
actual_log_accept_ratio_[1:])
# Ensure gradient is finite.
self.assertAllEqual(
np.ones_like(actual_grads_target_log_prob_).astype(np.bool),
np.isfinite(actual_grads_target_log_prob_))
def testHandlesNanFromKinetic(self):
if tf.executing_eagerly(): return
x = [1, np.inf, -np.inf, np.nan]
momentums, proposed_momentums = [
[np.reshape(self.dtype(x), [-1, 1])]
for x in np.meshgrid(x, x)]
num_chains = len(momentums[0])
momentums = [tf.convert_to_tensor(momentums[0])]
proposed_momentums = [tf.convert_to_tensor(proposed_momentums[0])]
log_acceptance_correction = _compute_log_acceptance_correction(
momentums,
proposed_momentums,
independent_chain_ndims=1)
grads = tf.gradients(ys=log_acceptance_correction, xs=momentums)
[actual_log_acceptance_correction, grads_] = self.evaluate([
log_acceptance_correction, grads])
# Ensure log_acceptance_correction is `inf` (note: that's positive inf) in
# weird cases and finite otherwise.
expected_log_acceptance_correction = -(
self.dtype([0] + [np.inf]*(num_chains - 1)))
self.assertAllEqual(expected_log_acceptance_correction,
actual_log_acceptance_correction)
# Ensure gradient is finite.
g = grads_[0].reshape([len(x), len(x)])[:, 0]
self.assertAllEqual(np.ones_like(g).astype(np.bool), np.isfinite(g))
# The remaining gradients are nan because the momentum was itself nan or
# inf.
g = grads_[0].reshape([len(x), len(x)])[:, 1:]
self.assertAllEqual(np.ones_like(g).astype(np.bool), np.isnan(g))
@test_util.test_all_tf_execution_regimes
class LogCorrectionTest16(test_util.TestCase, _LogCorrectionTest):
dtype = np.float16
@test_util.test_all_tf_execution_regimes
class LogCorrectionTest32(test_util.TestCase, _LogCorrectionTest):
dtype = np.float32
@test_util.test_all_tf_execution_regimes
class LogCorrectionTest64(test_util.TestCase, _LogCorrectionTest):
dtype = np.float64
class _HMCHandlesLists(object):
def testStateParts(self):
cast = lambda x: np.array(x, self.dtype)
dist_x = tfd.Normal(loc=cast(0), scale=cast(1))
dist_y = tfd.Independent(
tfd.Gamma(concentration=cast([1, 2]),
rate=cast([0.5, 0.75])),
reinterpreted_batch_ndims=1)
def target_log_prob(x, y):
return dist_x.log_prob(x) + dist_y.log_prob(y)
x0 = [dist_x.sample(seed=_set_seed(61)), dist_y.sample(seed=_set_seed(62))]
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob,
step_size=1.,
num_leapfrog_steps=1,
seed=_set_seed(49))
# We are using bijectors to sample from a transformed density defined on
# an unbounded domain. The samples returned are from the original bounded
# domain.
unconstraining_bijectors = [
tfb.Identity(), # Maps R to R.
tfb.Exp(), # Maps R to a positive real.
]
transformed_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=kernel, bijector=unconstraining_bijectors)
samples, _ = tfp.mcmc.sample_chain(
num_results=2000,
current_state=x0,
kernel=transformed_kernel,
num_burnin_steps=500,
parallel_iterations=1)
actual_means = [tf.reduce_mean(s, axis=0) for s in samples]
actual_vars = [tf.math.reduce_variance(s, axis=0) for s in samples]
expected_means = [dist_x.mean(), dist_y.mean()]
expected_vars = [dist_x.variance(), dist_y.variance()]
[
actual_means_,
actual_vars_,
expected_means_,
expected_vars_,
] = self.evaluate([
actual_means,
actual_vars,
expected_means,
expected_vars,
])
self.assertAllClose(expected_means_, actual_means_, atol=0.1, rtol=0.16)
self.assertAllClose(expected_vars_, actual_vars_, atol=0., rtol=0.5)
@test_util.test_all_tf_execution_regimes
class HMCHandlesLists32(_HMCHandlesLists, test_util.TestCase):
dtype = np.float32
@test_util.test_all_tf_execution_regimes
class HMCHandlesLists64(_HMCHandlesLists, test_util.TestCase):
dtype = np.float64
@test_util.test_all_tf_execution_regimes
class HMCAdaptiveStepSize(test_util.TestCase):
def setUp(self):
super(HMCAdaptiveStepSize, self).setUp()
tf.random.set_seed(10014)
np.random.seed(10014)
def test_multiple_step_sizes_different_ranks(self):
num_results = 5
# Ignoring adaptation (or assuming that adaptation is performed using
# a wrapper class like SimpleStepSizeAdaptation), test that we can
# construct and run an HMC kernel with state parts and matching per-element
# step sizes of varying rank.
initial_step_sizes = [1e-5, [1e-4, 1e-3]] # Scalar and vector state parts.
initial_state = [0., [0., 0.]]
dtype = np.float32
def target_log_prob_fn(x1, x2):
d = tfd.Normal(dtype(0.), dtype(1.))
return d.log_prob(x1) + tf.reduce_sum(d.log_prob(x2))
samples, _ = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=0,
current_state=[dtype(x) for x in initial_state],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=2,
step_size=initial_step_sizes,
state_gradients_are_stopped=True,
seed=_set_seed(252)),
parallel_iterations=1)
init_op = tf1.global_variables_initializer()
self.evaluate(init_op)
_ = self.evaluate(samples)
def test_multiple_step_sizes_different_dtype(self):
num_results = 5
initial_step_sizes = [1e-5, 1e-4]
initial_state = [0., 0.]
# Non-float32 dtype.
dtype = np.float64
step_size = [
tf.constant(initial_step_size, dtype=dtype, name='step_size')
for initial_step_size in initial_step_sizes]
def target_log_prob_fn(x1, x2):
return tf.reduce_sum(
tfd.Normal(dtype(0), dtype(1)).log_prob([x1, x2]),
axis=-1)
_, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=0,
current_state=[dtype(x) for x in initial_state],
kernel=tfp.mcmc.SimpleStepSizeAdaptation(
tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=2,
step_size=step_size,
state_gradients_are_stopped=True,
seed=_set_seed(252)),
num_adaptation_steps=2),
parallel_iterations=1)
step_size_ = self.evaluate(kernel_results.new_step_size)
# We apply the same adjustment to each step size in the list, so
# the starting ratio of step sizes should match the final ratio.
self.assertNear(step_size_[0][0]/step_size_[1][0],
step_size_[0][-1]/step_size_[1][-1], err=1e-4)
def test_finite_adaptation(self):
# Test that the adaptation runs for the specified number of steps.
# We set up a chain with a tiny initial step size, so every step accepts,
# and test that the final step size is incremented exactly
# `num_adaptation_steps` times.
num_results = 10
num_adaptation_steps = 3
initial_step_size = 1e-5
_, kernel_results = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=0,
current_state=tf.constant(0.),
kernel=tfp.mcmc.SimpleStepSizeAdaptation(
tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=lambda x: tfd.Normal(0., 1.).log_prob(x),
num_leapfrog_steps=2,
step_size=initial_step_size,
state_gradients_are_stopped=True,
seed=_set_seed(252)),
num_adaptation_steps=num_adaptation_steps,
adaptation_rate=1.),
parallel_iterations=1)
init_op = tf1.global_variables_initializer()
self.evaluate(init_op)
[_, step_size_] = self.evaluate([
kernel_results, kernel_results.new_step_size])
# Test that we've incremented the step size every time. This verifies
# that adaptation ran on each of the first `num_adaptation_steps` steps.
self.assertNear(initial_step_size * 2**num_adaptation_steps,
step_size_[num_adaptation_steps], err=1e-6)
# Test that the step size does not change after the first
# `num_adaptation_steps` steps.
self.assertEqual(step_size_[num_adaptation_steps:].min(),
step_size_[num_adaptation_steps:].max())
@test_util.test_all_tf_execution_regimes
class HMCEMAdaptiveStepSize(test_util.TestCase):
"""This test verifies that the docstring example works as advertised."""
def setUp(self):
super(HMCEMAdaptiveStepSize, self).setUp()
tf.random.set_seed(10014)
np.random.seed(10014)
def make_training_data(self, num_samples, dims, sigma):
dt = np.asarray(sigma).dtype
x = np.random.randn(dims, num_samples).astype(dt)
w = sigma * np.random.randn(1, dims).astype(dt)
noise = np.random.randn(num_samples).astype(dt)
y = w.dot(x) + noise
return y[0], x, w[0]
def make_weights_prior(self, dims, sigma):
return tfd.MultivariateNormalDiag(
loc=tf.zeros([dims], dtype=sigma.dtype),
scale_identity_multiplier=sigma)
def make_response_likelihood(self, w, x):
if tensorshape_util.rank(w.shape) == 1:
y_bar = tf.matmul(w[tf.newaxis], x)[0]
else:
y_bar = tf.matmul(w, x)
return tfd.Normal(loc=y_bar, scale=tf.ones_like(y_bar)) # [n]
def test_mcem_converges(self):
# Setup assumptions.
dtype = np.float32
num_samples = 500
dims = 10
weights_prior_true_scale = np.array(0.3, dtype)
y, x, w0 = self.make_training_data(num_samples, dims,
weights_prior_true_scale)
tf1.logging.vlog(1, 'w0: %s', w0)
sigma = tfp.util.TransformedVariable(
name='sigma', initial_value=np.array(1, dtype), bijector=tfb.Exp())
optimizer = tf.optimizers.SGD(learning_rate=0.01)
# TODO(b/144045420): eliminate the need for this tf.function decorator. The
# reason it was added was that the test code is written to work in both
# eager and graph modes, and in order to do so, calls this funtion
# repeatedly in an optimization loop. In graph mode, that results in the
# graph growing during optimization which results in runtime quadratic in
# number of optimization steps. Decorating with tf.function means the graph
# doesn't grow, but is hacky. Instead, we should ensure the code is written
# performantly in eager and graph modes, potentially by forking the
# implementation based on tf.executing_eagerly().
@tf.function(input_signature=[
tf.TensorSpec(shape=[dims], dtype=tf.float32),
tf.TensorSpec(shape=[], dtype=tf.float32),
])
def mcem_iter(weights_chain_start, step_size):
prior = self.make_weights_prior(dims, sigma)
def unnormalized_posterior_log_prob(w):
likelihood = self.make_response_likelihood(w, x)
return (prior.log_prob(w) +
tf.reduce_sum(likelihood.log_prob(y), axis=-1)) # [m]
def trace_fn(_, pkr):
return (pkr.inner_results.log_accept_ratio,
pkr.inner_results.accepted_results.step_size)
num_results = 2
weights, (log_accept_ratio, step_size) = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=0,
current_state=weights_chain_start,
kernel=tfp.mcmc.SimpleStepSizeAdaptation(
tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
num_leapfrog_steps=2,
step_size=step_size,
state_gradients_are_stopped=True,
),
# Adapt for the entirety of the trajectory.
num_adaptation_steps=2),
trace_fn=trace_fn,
parallel_iterations=1)
# We do an optimization step to propagate `sigma` after two HMC
# steps to propagate `weights`.
with tf.GradientTape() as tape:
loss = -tf.reduce_mean(unnormalized_posterior_log_prob(weights))
avg_acceptance_ratio = tf.math.exp(tfp.math.reduce_logmeanexp(
tf.minimum(log_accept_ratio, 0.)))
optimizer.apply_gradients([[
tape.gradient(loss, sigma.pretransformed_input),
sigma.pretransformed_input
]])
weights_prior_estimated_scale = tf.identity(sigma)
return (weights_prior_estimated_scale, weights[-1], loss,
step_size[-1], avg_acceptance_ratio)
if not tf.executing_eagerly():
# To create the variables.
mcem_iter(np.zeros(dims, dtype), 0.)
self.evaluate(tf1.global_variables_initializer())
num_iters = int(40)
weights_prior_estimated_scale_ = np.zeros(num_iters, dtype)
weights_ = np.zeros([num_iters + 1, dims], dtype)
loss_ = np.zeros([num_iters], dtype)
weights_[0] = np.random.randn(dims).astype(dtype)
step_size_ = 0.03
for iter_ in range(num_iters):
[
weights_prior_estimated_scale_[iter_],
weights_[iter_ + 1],
loss_[iter_],
step_size_,
avg_acceptance_ratio_,
] = self.evaluate(mcem_iter(weights_[iter_], step_size_))
# Enable using bazel flags:
# `--test_arg="--logtostderr" --test_arg="--vmodule=hmc_test=2"`,
# E.g.,
# bazel test --test_output=streamed -c opt :hmc_test \
# --test_filter=HMCEMAdaptiveStepSize \
# --test_arg="--logtostderr" --test_arg="--vmodule=hmc_test=2"
tf1.logging.vlog(
1, ('iter:{:>2} loss:{: 9.3f} scale:{:.3f} '
'step_size:{:.4f} avg_acceptance_ratio:{:.4f}').format(
iter_, loss_[iter_], weights_prior_estimated_scale_[iter_],
step_size_, avg_acceptance_ratio_))
# Loss had better decrease....
self.assertGreater(loss_[:10].mean(), loss_[-10:].mean())
self.assertNear(0.22, # Actually smaller than weights_prior_true_scale,
weights_prior_estimated_scale_[-5:].mean(),
err=0.022)
def test_step_size_adapts(self):
dtype = np.float32
def unnormalized_log_prob(x):
return -x - x**2
def trace_fn(_, pkr):
return [pkr.inner_results.is_accepted,
pkr.inner_results.accepted_results.step_size]
num_results = 1000
num_burnin_steps = 100
_, [is_accepted, step_size] = tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=tf.zeros([], dtype),
kernel=tfp.mcmc.SimpleStepSizeAdaptation(
tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_prob,
num_leapfrog_steps=2,
step_size=0.05,
seed=_set_seed(252)),
num_adaptation_steps=num_results + num_burnin_steps),
trace_fn=trace_fn,
parallel_iterations=1)
is_accepted_, step_size_ = self.evaluate([is_accepted, step_size])
# The important thing is that the new step_size does not equal the original,
# 0.05. However, we're not using `self.assertNotEqual` because testing for
# `1.25` reveals just how much the step_size has changed.
self.assertNear(1.25, step_size_[-100:].mean(), err=0.07)
self.assertNear(0., step_size_[-100:].std(), err=0.04)
# Anything in [0.6, 0.9] is sufficient. https://arxiv.org/abs/1411.6669
self.assertNear(0.75, is_accepted_.mean(), err=0.07)
if __name__ == '__main__':
tf.test.main()
|
the-stack_106_15684
|
#%% # Add to path
from pathlib import Path
import sys,os
this_path = str(Path().absolute())+"/"
print("File Path:", this_path)
sys.path.append(os.path.join(Path().absolute(), "src"))
# %%
# LIBRARIES
from data_loader.dataset_videos import DatasetHeadMovIMT
from data_loader import utils
import pandas as pd
# %%
"""
Is the script executed with SnakeMake `with_sm`?
"""
with_sm = False
try:
val = snakemake.input[0]
with_sm = True
print('Running with SnakeMake')
except NameError as e:
print('Running without snakemake')
with_sm = False
"""
INPUTS:
Setup paths either from snakemake or hardcoded relative paths
"""
prefix_filepath = "./" # Compressed dataset
dataset_path = snakemake.input[0] if with_sm else prefix_filepath+"dataset/dataset.tar.gz"
"""
OUTPUTS:
Setup paths either from snakemake or hardcoded relative paths
"""
# Path of JSON dictionary used to store the data per user
dict_json_name = snakemake.output[0] if with_sm else prefix_filepath+"temp/files_index_per_user.json"
# Filename of the file containing demographics and HMD movements data
movement_data_filename= snakemake.output[1] if with_sm else prefix_filepath+"temp/hmd_movements.pickle"
general_data_filename = snakemake.output[2] if with_sm else prefix_filepath+"dataset/demographics_IMT.csv"
# %%
""" #################
######### MAIN SCRIPT
################# """
"""
Extracts the whole data from the .tar file
The final pickle can be accessed through
"""
# Dataset container
data = DatasetHeadMovIMT(dataset_path,dict_json_name)
# Create JSON with dictionary of structured data
data.generate_file_index()
files_index = utils.load_json(dict_json_name)
print("Number of users in file index:", len(files_index.keys()))
# Indices with corrupted samples
omit_users = [14, 33, 52, 61, 62]
# Transform the paths in the compressed file into bytes
res = data.uncompress_data(files_index,
#debug_users = 15, # Load just a user index
# Users ID with empty data
list_unprocessed_users = omit_users
)
if res==0:
# Save CSV with dataframe of general data
data.general.to_csv(general_data_filename)
utils.create_pickle(data.movement, movement_data_filename)
else:
print("There was an error uncompressing the data")
print("End")
|
the-stack_106_15686
|
from django.utils.translation import npgettext, pgettext_lazy
from django_filters import (
CharFilter, ChoiceFilter, OrderingFilter, RangeFilter)
from ...core.filters import SortedFilterSet
from ...core.i18n import COUNTRY_CODE_CHOICES
from ...shipping.models import ShippingMethod
SORT_BY_FIELDS = {
'name': pgettext_lazy('Group list sorting option', 'name')}
class ShippingMethodFilter(SortedFilterSet):
name = CharFilter(
label=pgettext_lazy(
'Shipping method list filter label', 'Name'),
lookup_expr="icontains")
price = RangeFilter(
label=pgettext_lazy(
'Shipping method list filter label', 'Price range'),
name='price_per_country__price')
country = ChoiceFilter(
label=pgettext_lazy('Shipping method filter label', 'Country'),
name='price_per_country__country_code',
choices=COUNTRY_CODE_CHOICES)
sort_by = OrderingFilter(
label=pgettext_lazy('Product list sorting filter label', 'Sort by'),
fields=SORT_BY_FIELDS.keys(),
field_labels=SORT_BY_FIELDS)
class Meta:
model = ShippingMethod
fields = []
def get_summary_message(self):
counter = self.qs.count()
return npgettext(
'Number of matching records in the dashboard '
'shipping methods list',
'Found %(counter)d matching shipping method',
'Found %(counter)d matching shipping methods',
number=counter) % {'counter': counter}
|
the-stack_106_15688
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
""" Check that there is a playbook to run all role tests with both providers
"""
# vim: fileencoding=utf8
import glob
import os
import sys
GET_NM_VERSION = """
- block:
- name: Install NetworkManager
package:
name: NetworkManager
state: present
- name: Get NetworkManager version
command: rpm -q --qf "%{version}" NetworkManager
args:
warn: false
register: NetworkManager_version
when: true
when:
- ansible_distribution_major_version != '6'
"""
MINIMUM_NM_VERSION_CHECK = """
- NetworkManager_version.stdout is version({minimum_nm_version}, '>=')
"""
RUN_PLAYBOOK_WITH_NM = """# SPDX-License-Identifier: BSD-3-Clause
# This file was generated by ensure_provider_tests.py
---
# set network provider and gather facts
- hosts: all
name: Run playbook '{test_playbook}' with nm as provider
tasks:
- name: Set network provider to 'nm'
set_fact:
network_provider: nm
{get_nm_version}
# workaround for: https://github.com/ansible/ansible/issues/27973
# There is no way in Ansible to abort a playbook hosts with specific OS
# releases Therefore we include the playbook with the tests only if the hosts
# would support it.
# The test requires or should run with NetworkManager, therefore it cannot run
# on RHEL/CentOS 6
- import_playbook: {test_playbook}
when:
- ansible_distribution_major_version != '6'
{minimum_nm_version_check}"""
MINIMUM_VERSION = "minimum_version"
NM_ONLY_TESTS = {
"playbooks/tests_ethtool_features.yml": {
MINIMUM_VERSION: "'1.20.0'",
"comment": "# NetworkManager 1.20.0 introduced ethtool settings support",
},
"playbooks/tests_reapply.yml": {},
"playbooks/tests_states.yml": {},
"playbooks/tests_802_1x.yml": {},
}
IGNORE = [
# checked by tests_regression_nm.yml
"playbooks/tests_checkpoint_cleanup.yml",
]
RUN_PLAYBOOK_WITH_INITSCRIPTS = """# SPDX-License-Identifier: BSD-3-Clause
# This file was generated by ensure_provider_tests.py
---
- hosts: all
name: Run playbook '{test_playbook}' with initscripts as provider
tasks:
- name: Set network provider to 'initscripts'
set_fact:
network_provider: initscripts
- import_playbook: {test_playbook}
"""
def create_nm_playbook(test_playbook):
fileroot = os.path.splitext(os.path.basename(test_playbook))[0]
nm_testfile = fileroot + "_nm.yml"
minimum_nm_version = NM_ONLY_TESTS.get(test_playbook, {}).get(MINIMUM_VERSION)
nm_version_check = ""
if minimum_nm_version:
nm_version_check = MINIMUM_NM_VERSION_CHECK.format(
minimum_nm_version=minimum_nm_version
)
nominal_nm_testfile_data = RUN_PLAYBOOK_WITH_NM.format(
test_playbook=test_playbook,
get_nm_version=minimum_nm_version and GET_NM_VERSION or "",
minimum_nm_version_check=nm_version_check,
)
return nm_testfile, nominal_nm_testfile_data
def create_initscripts_playbook(test_playbook):
fileroot = os.path.splitext(os.path.basename(test_playbook))[0]
init_testfile = fileroot + "_initscripts.yml"
nominal_data = RUN_PLAYBOOK_WITH_INITSCRIPTS.format(test_playbook=test_playbook)
return init_testfile, nominal_data
def check_playbook(generate, testfile, test_playbook, nominal_data):
is_missing = False
returncode = None
if generate:
print(testfile)
with open(testfile, "w") as ofile:
ofile.write(nominal_data)
if not os.path.isfile(testfile) and not generate:
is_missing = True
else:
with open(testfile) as ifile:
testdata = ifile.read()
if testdata != nominal_data:
print(f"ERROR: Playbook does not match nominal value: {testfile}")
returncode = 1
return is_missing, returncode
def main():
testsfiles = glob.glob("playbooks/tests_*.yml")
missing = []
returncode = 0
# Generate files when specified
generate = bool(len(sys.argv) > 1 and sys.argv[1] == "generate")
if not testsfiles:
print("ERROR: No tests found")
returncode = 1
for test_playbook in testsfiles:
if test_playbook in IGNORE:
continue
nm_testfile, nominal_nm_testfile_data = create_nm_playbook(test_playbook)
is_missing, new_returncode = check_playbook(
generate=generate,
testfile=nm_testfile,
test_playbook=test_playbook,
nominal_data=nominal_nm_testfile_data,
)
if is_missing:
missing.append(test_playbook)
if new_returncode:
returncode = new_returncode
if test_playbook not in NM_ONLY_TESTS:
init_testfile, nominal_init_testfile_data = create_initscripts_playbook(
test_playbook
)
is_missing, new_returncode = check_playbook(
generate=generate,
testfile=init_testfile,
test_playbook=test_playbook,
nominal_data=nominal_init_testfile_data,
)
if is_missing:
missing.append(test_playbook)
if new_returncode:
returncode = new_returncode
if missing:
print("ERROR: No NM or initscripts tests found for:\n" + ", \n".join(missing))
print("Try to generate them with '{} generate'".format(sys.argv[0]))
returncode = 1
return returncode
if __name__ == "__main__":
sys.exit(main())
|
the-stack_106_15689
|
# bca4abm
# See full license in LICENSE.txt.
import logging
import os
import pandas as pd
import numpy as np
import openmatrix as omx
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import tracing
from activitysim.core import assign
from bca4abm import bca4abm as bca
from ..util.misc import missing_columns
from ..util.misc import add_summary_results
from ..util.misc import add_aggregate_results
logger = logging.getLogger(__name__)
"""
Link processor
"""
def read_link_manifest(data_dir, model_settings):
fname = os.path.join(data_dir, 'link_data_manifest.csv')
# strings that might be empty and hence misconstrued as nans
converters = {
# 'toll_file_name': str,
# 'toll_units': str,
}
manifest = pd.read_csv(fname, header=0, comment='#', converters=converters)
assert 'description' in manifest.columns
assert 'link_file_name' in manifest.columns
return manifest
def read_csv_file(data_dir, file_name, column_map=None):
fpath = os.path.join(data_dir, file_name)
if column_map:
usecols = list(column_map.keys())
# print "read_bca_table usecols: ", usecols
# FIXME - should we allow comment lines?
df = bca.read_csv_or_tsv(fpath, header=0, usecols=usecols)
df.rename(columns=column_map, inplace=True)
else:
df = bca.read_csv_or_tsv(fpath, header=0, comment='#')
return df
@inject.injectable()
def link_spec():
return bca.read_assignment_spec('link.csv')
@inject.injectable()
def link_daily_spec():
return bca.read_assignment_spec('link_daily.csv')
def add_tables_to_locals(data_dir, model_settings, locals_dict):
tables_tag = "TABLES"
if tables_tag in model_settings:
file_list = model_settings.get(tables_tag)
for var_name, filename in list(file_list.items()):
# print "add_tables_to_locals %s = %s" % (var_name, filename)
fpath = os.path.join(data_dir, filename)
df = bca.read_csv_or_tsv(fpath, header=0, comment='#')
locals_dict[var_name] = df
return locals_dict
def eval_link_spec(link_spec, link_file_names, data_dir,
link_file_column_map, link_index_fields,
model_settings, trace_tag=None, trace_od=None):
# accept a single string as well as a dict of {suffix: filename}
if isinstance(link_file_names, str):
link_file_names = {"": link_file_names}
locals_dict = config.get_model_constants(model_settings)
locals_dict.update(config.setting('globals'))
locals_dict = add_tables_to_locals(data_dir, model_settings, locals_dict)
results = {}
for scenario in ['base', 'build']:
logger.debug("eval_link_spec scenario %s" % scenario)
link_data_subdir = 'base-data' if scenario == 'base' else 'build-data'
df_list = []
for suffix, link_file_name in list(link_file_names.items()):
df = read_csv_file(data_dir=os.path.join(data_dir, link_data_subdir),
file_name=link_file_name,
column_map=link_file_column_map)
if link_index_fields:
df.set_index(link_index_fields, drop=True, inplace=True)
if suffix:
df = df.add_suffix("_" + suffix)
df_list.append(df)
links_df = pd.concat(df_list, axis=1)
# copy index fields into columns
if link_index_fields:
links_df = links_df.reset_index().set_index(link_index_fields, drop=False)
if trace_od:
od_column = model_settings.get('od_column', None)
if od_column:
o, d = trace_od
trace_rows = (links_df[od_column] == o) | (links_df[od_column] == d)
else:
# just dump first row
trace_rows = (links_df.index == 1)
else:
trace_rows = None
summary, trace_results, trace_assigned_locals = \
bca.eval_and_sum(link_spec,
links_df,
locals_dict,
df_alias='links',
trace_rows=trace_rows)
results[scenario] = summary
if trace_results is not None:
# FIXME: manually setting df.index.name to prevent
# activitysim.tracing.write_df_csv() from attempting to reset the index.
# write_df_csv() should be able to handle a multi-index dataframe.
trace_results.index.name = trace_results.index.names[0]
tracing.write_csv(trace_results,
file_name="%s_results_%s" % (trace_tag, scenario),
index_label='index',
column_labels=['label', 'link'])
if trace_assigned_locals:
tracing.write_csv(trace_assigned_locals,
file_name="%s_locals_%s" % (trace_tag, scenario))
results = results['build'] - results['base']
results.reset_index(drop=True, inplace=True)
return results
@inject.step()
def link_processor(link_spec, data_dir):
trace_label = 'link'
model_settings = config.read_model_settings('link.yaml')
link_manifest = read_link_manifest(data_dir, model_settings)
results = None
for row in link_manifest.itertuples(index=True):
link_index_fields = None
row_results = eval_link_spec(link_spec,
row.link_file_name,
data_dir,
model_settings.get('link_table_column_map', None),
link_index_fields,
model_settings=model_settings)
assigned_column_names = row_results.columns.values
row_results.insert(loc=0, column='description', value=row.description)
row_results.insert(loc=0, column='manifest_idx', value=row.Index)
if results is None:
results = row_results
else:
results = results.append(row_results, ignore_index=True)
results.reset_index(inplace=True)
add_summary_results(results, summary_column_names=assigned_column_names,
prefix='L_', spec=link_spec)
@inject.step()
def link_daily_processor(link_daily_spec, data_dir, trace_od):
trace_label = 'link_daily'
model_settings = config.read_model_settings('link_daily.yaml')
if 'link_daily_file_names' in model_settings:
link_daily_file_names = model_settings['link_daily_file_names']
elif 'link_daily_file_name' in model_settings:
link_daily_file_names = model_settings['link_daily_file_name']
else:
raise RuntimeError("no link_daily_file_names specified in model_settings file")
results = eval_link_spec(link_daily_spec,
link_daily_file_names,
data_dir,
model_settings.get('link_daily_table_column_map', None),
model_settings.get('link_daily_index_fields', None),
model_settings=model_settings,
trace_tag=trace_label,
trace_od=trace_od)
if 'silos' in link_daily_spec.columns:
add_aggregate_results(results, link_daily_spec, source=trace_label, zonal=False)
else:
add_summary_results(results, prefix='LD_', spec=link_daily_spec)
|
the-stack_106_15690
|
from typing import Dict, List, Union
def process(data: Dict[str, Union[str, dict]]):
# read data
project = data['project']
db_name = f'{project}.sqlite'
hit_blocks = data['hit_blocks']
name = '-'.join(hit_blocks.keys())
print('**********', name)
# imports ------------------------------------------------------
import pyrosetta, logging
from .make_pyrosetta_options import make_option_string
pyrosetta.distributed.maybe_init(extra_options=make_option_string(no_optH=False,
mute='all',
ignore_unrecognized_res=True,
load_PDB_components=False))
from fragmenstein.mpro import MProVictor
from sqlitedict import SqliteDict
import json, logging
# fix hits ------------------------------------------------------
from rdkit import Chem
hits = []
for hit_name in hit_blocks:
hit = Chem.MolFromMolBlock(hit_blocks[hit_name])
hit.SetProp('_Name', hit_name)
hits.append(hit)
# settings for Fragmenstein ------------------------------------
MProVictor.work_path = f'{project}' # db_name
MProVictor.monster_throw_on_discard = True
MProVictor.joining_cutoff = 5 # 10
MProVictor.quick_renanimation = False
MProVictor.error_to_catch = Exception
MProVictor.enable_stdout(logging.ERROR)
MProVictor.enable_logfile(f'{project}.log', logging.INFO)
MProVictor.capture_rdkit_log()
# analyse ------------------------------------------------------
try:
v = MProVictor.combine(hits=hits)
results = SqliteDict(db_name, encode=json.dumps, decode=json.loads, autocommit=True)
results[v.long_name] = v.summarise()
if not v.error_msg:
v.make_pse()
print('DONE', [hit.GetProp('_Name') for hit in hits])
return v.minimised_mol
except Exception as error:
error_msg = f'{error.__class__.__name__} {error}'
results = SqliteDict(db_name, encode=json.dumps, decode=json.loads, autocommit=True)
results[name] = {'error': error_msg}
MProVictor.journal.critical(f'*** {error_msg}, data: {data}')
except ConnectionError:
pass
return None
|
the-stack_106_15691
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DeploymentSummary(object):
"""
Summary of the Deployment.
"""
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "NEEDS_ATTENTION"
LIFECYCLE_STATE_NEEDS_ATTENTION = "NEEDS_ATTENTION"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "IN_PROGRESS"
LIFECYCLE_STATE_IN_PROGRESS = "IN_PROGRESS"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "CANCELING"
LIFECYCLE_STATE_CANCELING = "CANCELING"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "CANCELED"
LIFECYCLE_STATE_CANCELED = "CANCELED"
#: A constant which can be used with the lifecycle_state property of a DeploymentSummary.
#: This constant has a value of "SUCCEEDED"
LIFECYCLE_STATE_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the lifecycle_sub_state property of a DeploymentSummary.
#: This constant has a value of "RECOVERING"
LIFECYCLE_SUB_STATE_RECOVERING = "RECOVERING"
#: A constant which can be used with the lifecycle_sub_state property of a DeploymentSummary.
#: This constant has a value of "STARTING"
LIFECYCLE_SUB_STATE_STARTING = "STARTING"
#: A constant which can be used with the lifecycle_sub_state property of a DeploymentSummary.
#: This constant has a value of "STOPPING"
LIFECYCLE_SUB_STATE_STOPPING = "STOPPING"
#: A constant which can be used with the lifecycle_sub_state property of a DeploymentSummary.
#: This constant has a value of "MOVING"
LIFECYCLE_SUB_STATE_MOVING = "MOVING"
#: A constant which can be used with the lifecycle_sub_state property of a DeploymentSummary.
#: This constant has a value of "UPGRADING"
LIFECYCLE_SUB_STATE_UPGRADING = "UPGRADING"
#: A constant which can be used with the lifecycle_sub_state property of a DeploymentSummary.
#: This constant has a value of "RESTORING"
LIFECYCLE_SUB_STATE_RESTORING = "RESTORING"
#: A constant which can be used with the lifecycle_sub_state property of a DeploymentSummary.
#: This constant has a value of "BACKUP_IN_PROGRESS"
LIFECYCLE_SUB_STATE_BACKUP_IN_PROGRESS = "BACKUP_IN_PROGRESS"
#: A constant which can be used with the license_model property of a DeploymentSummary.
#: This constant has a value of "LICENSE_INCLUDED"
LICENSE_MODEL_LICENSE_INCLUDED = "LICENSE_INCLUDED"
#: A constant which can be used with the license_model property of a DeploymentSummary.
#: This constant has a value of "BRING_YOUR_OWN_LICENSE"
LICENSE_MODEL_BRING_YOUR_OWN_LICENSE = "BRING_YOUR_OWN_LICENSE"
#: A constant which can be used with the deployment_type property of a DeploymentSummary.
#: This constant has a value of "OGG"
DEPLOYMENT_TYPE_OGG = "OGG"
def __init__(self, **kwargs):
"""
Initializes a new DeploymentSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this DeploymentSummary.
:type id: str
:param display_name:
The value to assign to the display_name property of this DeploymentSummary.
:type display_name: str
:param description:
The value to assign to the description property of this DeploymentSummary.
:type description: str
:param compartment_id:
The value to assign to the compartment_id property of this DeploymentSummary.
:type compartment_id: str
:param time_created:
The value to assign to the time_created property of this DeploymentSummary.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this DeploymentSummary.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this DeploymentSummary.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", "NEEDS_ATTENTION", "IN_PROGRESS", "CANCELING", "CANCELED", "SUCCEEDED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_sub_state:
The value to assign to the lifecycle_sub_state property of this DeploymentSummary.
Allowed values for this property are: "RECOVERING", "STARTING", "STOPPING", "MOVING", "UPGRADING", "RESTORING", "BACKUP_IN_PROGRESS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_sub_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this DeploymentSummary.
:type lifecycle_details: str
:param freeform_tags:
The value to assign to the freeform_tags property of this DeploymentSummary.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this DeploymentSummary.
:type defined_tags: dict(str, dict(str, object))
:param subnet_id:
The value to assign to the subnet_id property of this DeploymentSummary.
:type subnet_id: str
:param license_model:
The value to assign to the license_model property of this DeploymentSummary.
Allowed values for this property are: "LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type license_model: str
:param fqdn:
The value to assign to the fqdn property of this DeploymentSummary.
:type fqdn: str
:param cpu_core_count:
The value to assign to the cpu_core_count property of this DeploymentSummary.
:type cpu_core_count: int
:param is_auto_scaling_enabled:
The value to assign to the is_auto_scaling_enabled property of this DeploymentSummary.
:type is_auto_scaling_enabled: bool
:param is_public:
The value to assign to the is_public property of this DeploymentSummary.
:type is_public: bool
:param public_ip_address:
The value to assign to the public_ip_address property of this DeploymentSummary.
:type public_ip_address: str
:param private_ip_address:
The value to assign to the private_ip_address property of this DeploymentSummary.
:type private_ip_address: str
:param deployment_url:
The value to assign to the deployment_url property of this DeploymentSummary.
:type deployment_url: str
:param system_tags:
The value to assign to the system_tags property of this DeploymentSummary.
:type system_tags: dict(str, dict(str, object))
:param is_latest_version:
The value to assign to the is_latest_version property of this DeploymentSummary.
:type is_latest_version: bool
:param time_upgrade_required:
The value to assign to the time_upgrade_required property of this DeploymentSummary.
:type time_upgrade_required: datetime
:param deployment_type:
The value to assign to the deployment_type property of this DeploymentSummary.
Allowed values for this property are: "OGG", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type deployment_type: str
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'description': 'str',
'compartment_id': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'lifecycle_sub_state': 'str',
'lifecycle_details': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'subnet_id': 'str',
'license_model': 'str',
'fqdn': 'str',
'cpu_core_count': 'int',
'is_auto_scaling_enabled': 'bool',
'is_public': 'bool',
'public_ip_address': 'str',
'private_ip_address': 'str',
'deployment_url': 'str',
'system_tags': 'dict(str, dict(str, object))',
'is_latest_version': 'bool',
'time_upgrade_required': 'datetime',
'deployment_type': 'str'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'description': 'description',
'compartment_id': 'compartmentId',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'lifecycle_sub_state': 'lifecycleSubState',
'lifecycle_details': 'lifecycleDetails',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'subnet_id': 'subnetId',
'license_model': 'licenseModel',
'fqdn': 'fqdn',
'cpu_core_count': 'cpuCoreCount',
'is_auto_scaling_enabled': 'isAutoScalingEnabled',
'is_public': 'isPublic',
'public_ip_address': 'publicIpAddress',
'private_ip_address': 'privateIpAddress',
'deployment_url': 'deploymentUrl',
'system_tags': 'systemTags',
'is_latest_version': 'isLatestVersion',
'time_upgrade_required': 'timeUpgradeRequired',
'deployment_type': 'deploymentType'
}
self._id = None
self._display_name = None
self._description = None
self._compartment_id = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._lifecycle_sub_state = None
self._lifecycle_details = None
self._freeform_tags = None
self._defined_tags = None
self._subnet_id = None
self._license_model = None
self._fqdn = None
self._cpu_core_count = None
self._is_auto_scaling_enabled = None
self._is_public = None
self._public_ip_address = None
self._private_ip_address = None
self._deployment_url = None
self._system_tags = None
self._is_latest_version = None
self._time_upgrade_required = None
self._deployment_type = None
@property
def id(self):
"""
**[Required]** Gets the id of this DeploymentSummary.
The `OCID`__ of the deployment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this DeploymentSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DeploymentSummary.
The `OCID`__ of the deployment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this DeploymentSummary.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
Gets the display_name of this DeploymentSummary.
An object's Display Name.
:return: The display_name of this DeploymentSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this DeploymentSummary.
An object's Display Name.
:param display_name: The display_name of this DeploymentSummary.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
Gets the description of this DeploymentSummary.
Metadata about this specific object.
:return: The description of this DeploymentSummary.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this DeploymentSummary.
Metadata about this specific object.
:param description: The description of this DeploymentSummary.
:type: str
"""
self._description = description
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this DeploymentSummary.
The `OCID`__ of the compartment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this DeploymentSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this DeploymentSummary.
The `OCID`__ of the compartment being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this DeploymentSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def time_created(self):
"""
Gets the time_created of this DeploymentSummary.
The time the resource was created. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this DeploymentSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this DeploymentSummary.
The time the resource was created. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this DeploymentSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this DeploymentSummary.
The time the resource was last updated. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:return: The time_updated of this DeploymentSummary.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this DeploymentSummary.
The time the resource was last updated. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:param time_updated: The time_updated of this DeploymentSummary.
:type: datetime
"""
self._time_updated = time_updated
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this DeploymentSummary.
Possible lifecycle states.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", "NEEDS_ATTENTION", "IN_PROGRESS", "CANCELING", "CANCELED", "SUCCEEDED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this DeploymentSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this DeploymentSummary.
Possible lifecycle states.
:param lifecycle_state: The lifecycle_state of this DeploymentSummary.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED", "NEEDS_ATTENTION", "IN_PROGRESS", "CANCELING", "CANCELED", "SUCCEEDED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_sub_state(self):
"""
Gets the lifecycle_sub_state of this DeploymentSummary.
Possible GGS lifecycle sub-states.
Allowed values for this property are: "RECOVERING", "STARTING", "STOPPING", "MOVING", "UPGRADING", "RESTORING", "BACKUP_IN_PROGRESS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_sub_state of this DeploymentSummary.
:rtype: str
"""
return self._lifecycle_sub_state
@lifecycle_sub_state.setter
def lifecycle_sub_state(self, lifecycle_sub_state):
"""
Sets the lifecycle_sub_state of this DeploymentSummary.
Possible GGS lifecycle sub-states.
:param lifecycle_sub_state: The lifecycle_sub_state of this DeploymentSummary.
:type: str
"""
allowed_values = ["RECOVERING", "STARTING", "STOPPING", "MOVING", "UPGRADING", "RESTORING", "BACKUP_IN_PROGRESS"]
if not value_allowed_none_or_none_sentinel(lifecycle_sub_state, allowed_values):
lifecycle_sub_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_sub_state = lifecycle_sub_state
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this DeploymentSummary.
Describes the object's current state in detail. For example, it can be used to provide actionable information for a resource in a Failed state.
:return: The lifecycle_details of this DeploymentSummary.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this DeploymentSummary.
Describes the object's current state in detail. For example, it can be used to provide actionable information for a resource in a Failed state.
:param lifecycle_details: The lifecycle_details of this DeploymentSummary.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this DeploymentSummary.
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this DeploymentSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this DeploymentSummary.
A simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this DeploymentSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this DeploymentSummary.
Tags defined for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this DeploymentSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this DeploymentSummary.
Tags defined for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this DeploymentSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def subnet_id(self):
"""
**[Required]** Gets the subnet_id of this DeploymentSummary.
The `OCID`__ of the subnet being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The subnet_id of this DeploymentSummary.
:rtype: str
"""
return self._subnet_id
@subnet_id.setter
def subnet_id(self, subnet_id):
"""
Sets the subnet_id of this DeploymentSummary.
The `OCID`__ of the subnet being referenced.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param subnet_id: The subnet_id of this DeploymentSummary.
:type: str
"""
self._subnet_id = subnet_id
@property
def license_model(self):
"""
**[Required]** Gets the license_model of this DeploymentSummary.
The Oracle license model that applies to a Deployment.
Allowed values for this property are: "LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The license_model of this DeploymentSummary.
:rtype: str
"""
return self._license_model
@license_model.setter
def license_model(self, license_model):
"""
Sets the license_model of this DeploymentSummary.
The Oracle license model that applies to a Deployment.
:param license_model: The license_model of this DeploymentSummary.
:type: str
"""
allowed_values = ["LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE"]
if not value_allowed_none_or_none_sentinel(license_model, allowed_values):
license_model = 'UNKNOWN_ENUM_VALUE'
self._license_model = license_model
@property
def fqdn(self):
"""
Gets the fqdn of this DeploymentSummary.
A three-label Fully Qualified Domain Name (FQDN) for a resource.
:return: The fqdn of this DeploymentSummary.
:rtype: str
"""
return self._fqdn
@fqdn.setter
def fqdn(self, fqdn):
"""
Sets the fqdn of this DeploymentSummary.
A three-label Fully Qualified Domain Name (FQDN) for a resource.
:param fqdn: The fqdn of this DeploymentSummary.
:type: str
"""
self._fqdn = fqdn
@property
def cpu_core_count(self):
"""
Gets the cpu_core_count of this DeploymentSummary.
The Minimum number of OCPUs to be made available for this Deployment.
:return: The cpu_core_count of this DeploymentSummary.
:rtype: int
"""
return self._cpu_core_count
@cpu_core_count.setter
def cpu_core_count(self, cpu_core_count):
"""
Sets the cpu_core_count of this DeploymentSummary.
The Minimum number of OCPUs to be made available for this Deployment.
:param cpu_core_count: The cpu_core_count of this DeploymentSummary.
:type: int
"""
self._cpu_core_count = cpu_core_count
@property
def is_auto_scaling_enabled(self):
"""
Gets the is_auto_scaling_enabled of this DeploymentSummary.
Indicates if auto scaling is enabled for the Deployment's CPU core count.
:return: The is_auto_scaling_enabled of this DeploymentSummary.
:rtype: bool
"""
return self._is_auto_scaling_enabled
@is_auto_scaling_enabled.setter
def is_auto_scaling_enabled(self, is_auto_scaling_enabled):
"""
Sets the is_auto_scaling_enabled of this DeploymentSummary.
Indicates if auto scaling is enabled for the Deployment's CPU core count.
:param is_auto_scaling_enabled: The is_auto_scaling_enabled of this DeploymentSummary.
:type: bool
"""
self._is_auto_scaling_enabled = is_auto_scaling_enabled
@property
def is_public(self):
"""
Gets the is_public of this DeploymentSummary.
True if this object is publicly available.
:return: The is_public of this DeploymentSummary.
:rtype: bool
"""
return self._is_public
@is_public.setter
def is_public(self, is_public):
"""
Sets the is_public of this DeploymentSummary.
True if this object is publicly available.
:param is_public: The is_public of this DeploymentSummary.
:type: bool
"""
self._is_public = is_public
@property
def public_ip_address(self):
"""
Gets the public_ip_address of this DeploymentSummary.
The public IP address representing the access point for the Deployment.
:return: The public_ip_address of this DeploymentSummary.
:rtype: str
"""
return self._public_ip_address
@public_ip_address.setter
def public_ip_address(self, public_ip_address):
"""
Sets the public_ip_address of this DeploymentSummary.
The public IP address representing the access point for the Deployment.
:param public_ip_address: The public_ip_address of this DeploymentSummary.
:type: str
"""
self._public_ip_address = public_ip_address
@property
def private_ip_address(self):
"""
Gets the private_ip_address of this DeploymentSummary.
The private IP address in the customer's VCN representing the access point for the associated endpoint service in the GoldenGate service VCN.
:return: The private_ip_address of this DeploymentSummary.
:rtype: str
"""
return self._private_ip_address
@private_ip_address.setter
def private_ip_address(self, private_ip_address):
"""
Sets the private_ip_address of this DeploymentSummary.
The private IP address in the customer's VCN representing the access point for the associated endpoint service in the GoldenGate service VCN.
:param private_ip_address: The private_ip_address of this DeploymentSummary.
:type: str
"""
self._private_ip_address = private_ip_address
@property
def deployment_url(self):
"""
Gets the deployment_url of this DeploymentSummary.
The URL of a resource.
:return: The deployment_url of this DeploymentSummary.
:rtype: str
"""
return self._deployment_url
@deployment_url.setter
def deployment_url(self, deployment_url):
"""
Sets the deployment_url of this DeploymentSummary.
The URL of a resource.
:param deployment_url: The deployment_url of this DeploymentSummary.
:type: str
"""
self._deployment_url = deployment_url
@property
def system_tags(self):
"""
Gets the system_tags of this DeploymentSummary.
The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see `Resource Tags`__.
Example: `{orcl-cloud: {free-tier-retain: true}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The system_tags of this DeploymentSummary.
:rtype: dict(str, dict(str, object))
"""
return self._system_tags
@system_tags.setter
def system_tags(self, system_tags):
"""
Sets the system_tags of this DeploymentSummary.
The system tags associated with this resource, if any. The system tags are set by Oracle Cloud Infrastructure services. Each key is predefined and scoped to namespaces. For more information, see `Resource Tags`__.
Example: `{orcl-cloud: {free-tier-retain: true}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param system_tags: The system_tags of this DeploymentSummary.
:type: dict(str, dict(str, object))
"""
self._system_tags = system_tags
@property
def is_latest_version(self):
"""
Gets the is_latest_version of this DeploymentSummary.
Indicates if the resource is the the latest available version.
:return: The is_latest_version of this DeploymentSummary.
:rtype: bool
"""
return self._is_latest_version
@is_latest_version.setter
def is_latest_version(self, is_latest_version):
"""
Sets the is_latest_version of this DeploymentSummary.
Indicates if the resource is the the latest available version.
:param is_latest_version: The is_latest_version of this DeploymentSummary.
:type: bool
"""
self._is_latest_version = is_latest_version
@property
def time_upgrade_required(self):
"""
Gets the time_upgrade_required of this DeploymentSummary.
The date the existing version in use will no longer be considered as usable and an upgrade will be required. This date is typically 6 months after the version was released for use by GGS. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:return: The time_upgrade_required of this DeploymentSummary.
:rtype: datetime
"""
return self._time_upgrade_required
@time_upgrade_required.setter
def time_upgrade_required(self, time_upgrade_required):
"""
Sets the time_upgrade_required of this DeploymentSummary.
The date the existing version in use will no longer be considered as usable and an upgrade will be required. This date is typically 6 months after the version was released for use by GGS. The format is defined by `RFC3339`__, such as `2016-08-25T21:10:29.600Z`.
__ https://tools.ietf.org/html/rfc3339
:param time_upgrade_required: The time_upgrade_required of this DeploymentSummary.
:type: datetime
"""
self._time_upgrade_required = time_upgrade_required
@property
def deployment_type(self):
"""
Gets the deployment_type of this DeploymentSummary.
The type of deployment, the value determines the exact 'type' of service executed in the Deployment. NOTE: Use of the value OGG is maintained for backward compatibility purposes. Its use is discouraged
in favor of the equivalent DATABASE_ORACLE value.
Allowed values for this property are: "OGG", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The deployment_type of this DeploymentSummary.
:rtype: str
"""
return self._deployment_type
@deployment_type.setter
def deployment_type(self, deployment_type):
"""
Sets the deployment_type of this DeploymentSummary.
The type of deployment, the value determines the exact 'type' of service executed in the Deployment. NOTE: Use of the value OGG is maintained for backward compatibility purposes. Its use is discouraged
in favor of the equivalent DATABASE_ORACLE value.
:param deployment_type: The deployment_type of this DeploymentSummary.
:type: str
"""
allowed_values = ["OGG"]
if not value_allowed_none_or_none_sentinel(deployment_type, allowed_values):
deployment_type = 'UNKNOWN_ENUM_VALUE'
self._deployment_type = deployment_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_106_15692
|
# This module contains settings for pdfDocs Spider
# Parameters for type are as follows
# 1: bao cao tai chinh
# 23: nghi quyet hoi dong quan tri
# 8: giai trinh ket qua kinh doanh
# 9: bao cao quan tri
# 2: bao cao thuong nien (cai nay rat nang ve content/graphics)
# 4: nghi quyet dai hoi co dong
# 5: tai lieu dai hoi co dong (file rar/zip)
# 3: ban cao bach
# 10: ty le von kha dung
# 6: tai lieu khac
import scraper_vietstock.spiders.models.constants as constants
import scraper_vietstock.spiders.models.utilities as utilities
name = "pdfDocs"
report_types = ["1", "23", "8", "9", "2", "4", "5", "3", "10", "6"]
data = {"url": "https://finance.vietstock.vn/data/getdocument",
"formdata": {
"code": "", # ticker
"type": "", # document type, see above
},
"headers": {
"User-Agent": constants.USER_AGENT,
"Content-Type": constants.CONTENT_TYPE
},
"cookies": {
"language": constants.LANGUAGE,
"vts_usr_lg": constants.USER_COOKIE
},
"meta": {
"ticker": "", # ticker
"ReportType": "" # document type, use this so we can use TickerCrawlSpiderMiddleware
}
}
log_settings = utilities.log_settings(spiderName=name,
log_level="INFO",
log_formatter="scraper_vietstock.spiders.models.utilities.TickerSpiderLogFormatter"
)
middlewares_settings = {
'DOWNLOADER_MIDDLEWARES': {
'rotating_proxies.middlewares.RotatingProxyMiddleware': 610,
'rotating_proxies.middlewares.BanDetectionMiddleware': 620,
# 'scraper_vietstock.middlewares.TickerCrawlDownloaderMiddleware': 901,
# 'scraper_vietstock.fad_stats.TickerCrawlerStats': 850,
'scrapy.downloadermiddlewares.stats.DownloaderStats': None,
},
'SPIDER_MIDDLEWARES': {
# 'scraper_vietstock.middlewares.TickerCrawlSpiderMiddleware': 45
}
}
proxy_settings = {
'ROTATING_PROXY_LIST': constants.PRIVOXY_LOCAL_PROXY,
}
redis_key_settings = {"REDIS_START_URLS_KEY": "%(name)s:corpAZtickers"}
file_settings = {
'ITEM_PIPELINES': {'scrapy.pipelines.files.FilesPipeline': 1},
'FILES_STORE': 'LocalData/PDFs',
'FILES_URLS_FIELD': 'file_urls',
'FILES_RESULT_FIELD': 'files'
}
settings = {**log_settings, **middlewares_settings, **proxy_settings, ** redis_key_settings, **file_settings}
|
the-stack_106_15695
|
import pandas as pd
import numpy as np
import sys
import os
# ## DNA 文件处理
#
# 原始 DNA-SNV 的文件每个 item 为 (Sample_ID, gene, dna_vaf....), 记录了某个患者基因的一次突变<br>
# 1. 初步筛选,按filter和effect列进行筛选
# 2. 统计列表中存在的所有 gene
# 3. 根据 ID 合并存在的基因突变特征, 若未突变用0填充
# 4. 当同一 ID, 同一 gene 有多个突变记录时,保留 dna_vaf 最高的值,dna_vaf的值进行cutoff=0.05,若dna_vaf大于0.05取1,否则取0
#
# 数据保存为 DNA_VAF_GDC.csv 文件 (10154, 19655)
# In[4]:
##pf
# file_path = "GDC-PANCAN.mutect2_snv.tsv"
## yw
file_path = "/home/SENSETIME/chenfeiyang/data1/GDC_data/GDC-PANCAN.mutect2_snv.tsv"
data = pd.read_csv(file_path, sep="\t") # 3175929 rows × 11 columns
# print(data.groupby("Sample_ID"))
# data
data = data[data['filter'] == 'PASS']
mask = data[['effect']].apply(
lambda x: x.str.contains(
'inframe|frameshift|missense|splice',
regex=True
)
).any(axis=1)
data = data[mask]
# all gene
gene_names = list(sorted(set(data['gene'])))
# print(len(gene_names))
gene_map_idx = {}
for idx in range(len(gene_names)):
name = gene_names[idx]
gene_map_idx[name] = idx
sample_id_list = []
sample_dna_snv_mat = []
idx = 0
for gp in data.groupby("Sample_ID"): # 30mins
sample_id_list.append(gp[0]) # TCGA-02-0003-01A
df = gp[1]
# print(df.columns)
# ['Sample_ID', 'gene', 'chrom', 'start', 'end', 'ref', 'alt',
# Amino_Acid_Change', 'effect', 'filter', 'dna_vaf']
df = df.sort_values(by=["gene", "dna_vaf"], ascending=False)[["gene", "dna_vaf"]]
# 按照基因名排序
dup_df = df.drop_duplicates(subset="gene")
# 有多个表达量时, 默认选取第一个
# sample df -> num_array
sample_dna_snv = np.zeros(len(gene_names))
for idx, row in dup_df.iterrows():
gene_name = row[0]
dna_val = row[1]
sample_dna_snv[gene_map_idx[gene_name]] = 1 if dna_val > 0.05 else 0
sample_dna_snv_mat.append(sample_dna_snv)
# break
if len(sample_dna_snv_mat) % 2000 == 0:
print(len(sample_dna_snv_mat))
dna_snv_df = pd.DataFrame(sample_dna_snv_mat, columns=gene_names)
dna_snv_df['sample_id'] = sample_id_list
dna_snv_df = dna_snv_df.set_index('sample_id')
print(dna_snv_df.shape)
# (10154, 19655)
dna_snv_df.to_csv(r"/home/SENSETIME/chenfeiyang/data1/data/DNA_VAF_GDC.csv")
|
the-stack_106_15697
|
"""DNS Authenticator for 1cloud.ru DNS."""
import logging
import json
import requests
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
logger = logging.getLogger(__name__)
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for 1cloud DNS"""
description = 'Obtain certificates using a DNS TXT record (if you are using Reg.ru for DNS).'
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.credentials = None
@classmethod
def add_parser_arguments(cls, add, default_propagation_seconds=120):
super(Authenticator, cls).add_parser_arguments(
add, default_propagation_seconds=default_propagation_seconds)
add('credentials', help='Path to 1cloud credentials INI file',
default='/etc/letsencrypt/1cloud.ini')
def more_info(self):
return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using 1cloud API'
def _setup_credentials(self):
self.credentials = self._configure_credentials(
'credentials',
'path to 1cloud credentials INI file',
{
'token': '1cloud API token'
}
)
def _perform(self, domain, validation_name, validation):
self._get_1cloud_client().add_txt_record(validation_name, validation)
def _cleanup(self, domain, validation_name, validation):
self._get_1cloud_client().del_txt_record(validation_name, validation)
def _get_1cloud_client(self):
return _1CloudClient(self.credentials.conf('token'))
class DomainNotFoundError(BaseException):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class RecordNotFoundError(BaseException):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class _1CloudClient(object):
"""
1Cloud API Client Wrapper
"""
def __init__(self, token):
self._token = token
def add_txt_record(self, record_name, record_value):
"""
Creates a TXT with given record_name and record_value
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_value: The record value
:raises certbot.errors.PluginError: if an error occurs communicating with the 1cloud API
"""
try:
parsed = self._split_record_name(record_name)
domain_id = self._load_domain_info(parsed['domain'])['ID']
response = requests.post('https://api.1cloud.ru/dns/recordtxt', json={
'DomainId': domain_id,
'Name': parsed['subdomain'],
'Text': record_value,
'TTL': '1'
}, headers=self._create_headers())
response.raise_for_status()
except requests.RequestException as e:
logger.error('Encountered error adding TXT record: %d %s', e, e)
raise errors.PluginError(
'Error communicating with 1cloud API: {0}'.format(e))
except DomainNotFoundError as e:
logger.error('Encountered error adding TXT record: %d %s', e, e)
raise errors.PluginError(
'Error communicating with 1cloud API: {0}'.format(e))
def del_txt_record(self, record_name, record_value):
"""
Creates a TXT with given record_name and record_value
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_value: The record value
:raises certbot.errors.PluginError: if an error occurs communicating with the 1cloud API
"""
try:
parsed = self._split_record_name(record_name)
domain_info = self._load_domain_info(parsed['domain'])
text_1cloud_value = '"' + record_value + '"'
for record in domain_info['LinkedRecords']:
if record['TypeRecord'] == 'TXT' and record['HostName'] == record_name + '.' and record['Text'].strip() == text_1cloud_value:
record_id = record['ID']
domain_id = domain_info['ID']
response = requests.delete(
'https://api.1cloud.ru/dns/{0}/{1}'.format(domain_id, record_id), headers=self._create_headers())
response.raise_for_status()
return
raise RecordNotFoundError()
except requests.RequestException as e:
logger.error('Encountered error removing TXT record: %d %s', e, e)
raise errors.PluginError(
'Error communicating with 1cloud API: {0}'.format(e))
except RecordNotFoundError as e:
logger.error('Encountered error removing TXT record: %d %s', e, e)
raise errors.PluginError(
'Error communicating with 1cloud API: {0}'.format(e))
except DomainNotFoundError as e:
logger.error('Encountered error removing TXT record: %d %s', e, e)
raise errors.PluginError(
'Error communicating with 1cloud API: {0}'.format(e))
def _create_headers(self):
return {
'Authorization': 'Bearer {0}'.format(self._token)
}
@classmethod
def _split_record_name(cls, record_name):
pieces = record_name.split('.')
return {
'domain': '.'.join(pieces[-2:]),
'subdomain': '.'.join(pieces[:-2])
}
def _load_domain_info(self, domain):
response = requests.get(
'https://api.1cloud.ru/dns', headers=self._create_headers())
response.raise_for_status()
data = response.json()
for info in data:
if info['Name'] == domain:
return info
raise DomainNotFoundError()
|
the-stack_106_15698
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Navcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
import time
import http.client
import urllib.parse
class CommunityFundPaymentRequestDuplicate(NavCoinTestFramework):
"""Tests the payment request procedures of the Community fund."""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self, split=False):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, []))
connect_nodes(self.nodes[0], 1)
self.is_network_split = split
def run_test(self):
activate_cfund(self.nodes[0])
self.nodes[0].donatefund(100000)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
balanceSplit = float(balance / self.num_nodes)
addresses = []
for x in range(self.num_nodes):
addresses.append(self.nodes[x].getnewaddress())
self.nodes[0].sendtoaddress(addresses[x], balanceSplit)
slow_gen(self.nodes[0], 1)
sync_blocks(self.nodes)
for x in range(self.num_nodes):
assert(self.nodes[x].getbalance() >= balanceSplit)
SATOSHI = 100000000
proposals = [{},{},{},{},{}]
# internships
proposals[0]["address"] = self.nodes[0].getnewaddress()
proposals[0]["amount"] = 10000
proposals[0]["preqs"] = [{"amount": 3333}]
# beekart posters
proposals[1]["address"] = self.nodes[0].getnewaddress()
proposals[1]["amount"] = 5000
proposals[1]["preqs"] = [{"amount": 5000}]
# vinyl stickers
proposals[2]["address"] = self.nodes[0].getnewaddress()
proposals[2]["amount"] = 2000
proposals[2]["preqs"] = [{"amount": 2000}]
# nio italia
proposals[3]["address"] = self.nodes[0].getnewaddress()
proposals[3]["amount"] = 1000
proposals[3]["preqs"] = [{"amount": 1000}]
# cryptocandor video
proposals[4]["address"] = self.nodes[0].getnewaddress()
proposals[4]["amount"] = 5500
proposals[4]["preqs"] = [{"amount": 2500}, {"amount": 2000}]
# Create proposals
for proposal in proposals:
proposal["proposalHash"] = self.nodes[0].createproposal(proposal["address"], proposal["amount"], 36000, "proposal description")["hash"]
end_cycle(self.nodes[0])
sync_blocks(self.nodes)
# Accept the proposals
for proposal in proposals:
self.nodes[0].proposalvote(proposal["proposalHash"], "yes")
slow_gen(self.nodes[0], 1)
end_cycle(self.nodes[0])
locked_accepted = self.nodes[0].cfundstats()["funds"]["locked"]
sync_blocks(self.nodes)
# Check proposals are accepted on all nodes
for x in range(self.num_nodes):
locked_tallied = 0
for proposal in proposals:
assert(self.nodes[x].getproposal(proposal["proposalHash"])["state"] == 1)
assert(self.nodes[x].getproposal(proposal["proposalHash"])["status"] == "accepted")
locked_tallied += float(self.nodes[x].getproposal(proposal["proposalHash"])["requestedAmount"])
assert(locked_accepted == locked_tallied)
# Create payment requests for all the proposals
for proposal in proposals:
for preq in proposal["preqs"]:
preq["hash"] = self.nodes[0].createpaymentrequest(proposal["proposalHash"], preq["amount"], "payment request description")["hash"]
slow_gen(self.nodes[0], 1)
sync_blocks(self.nodes)
# Check payment requests are present on all nodes
for x in range(self.num_nodes):
for proposal in proposals:
for preq in proposal["preqs"]:
assert(self.nodes[x].getpaymentrequest(preq["hash"])["state"] == 0)
assert(self.nodes[x].getpaymentrequest(preq["hash"])["status"] == "pending")
assert(self.nodes[x].cfundstats()["funds"]["locked"] == locked_accepted)
end_cycle(self.nodes[0])
# vote yes for the payment requests
for proposal in proposals:
for preq in proposal["preqs"]:
self.nodes[0].paymentrequestvote(preq["hash"], "yes")
slow_gen(self.nodes[0], 1)
end_cycle(self.nodes[0])
sync_blocks(self.nodes)
for x in range(self.num_nodes):
for proposal in proposals:
for preq in proposal["preqs"]:
assert(self.nodes[x].getpaymentrequest(preq["hash"])["state"] == 1)
assert(self.nodes[x].getpaymentrequest(preq["hash"])["paidOnBlock"] == "0000000000000000000000000000000000000000000000000000000000000000")
wallet_info1 = self.nodes[0].getwalletinfo()
while self.nodes[0].getpaymentrequest(proposals[0]["preqs"][0]["hash"])["paidOnBlock"] == "0000000000000000000000000000000000000000000000000000000000000000":
blocks = slow_gen(self.nodes[0], 1)
sync_blocks(self.nodes)
wallet_info2 = self.nodes[0].getwalletinfo()
# check all wallets see the payout
for x in range(self.num_nodes):
paymentsFound = 0
preqsFound = 0
for proposal in proposals:
for preq in proposal["preqs"]:
preqsFound += 1
payoutBlockHash = self.nodes[x].getpaymentrequest(preq["hash"])["paidOnBlock"]
payoutBlock = self.nodes[x].getblock(payoutBlockHash)
payoutHex = self.nodes[x].getrawtransaction(payoutBlock["tx"][0])
payoutTx = self.nodes[x].decoderawtransaction(payoutHex)
for vout in payoutTx["vout"]:
if vout["scriptPubKey"]["addresses"][0] == proposal["address"] and vout["valueSat"] == preq["amount"] * SATOSHI:
paymentsFound += 1
assert(paymentsFound == 6)
assert(preqsFound == 6)
# check node zero received the payout
lastTransactions = self.nodes[0].listtransactions("", 7)
# print(lastTransactions)
txFound = 0
preqsFound = 0
for tx in lastTransactions:
for proposal in proposals:
for preq in proposal["preqs"]:
preqsFound += 1
if tx["address"] == proposal["address"] and int(tx["amount"] * SATOSHI) == int(preq["amount"] * SATOSHI):
assert(tx["category"] == "immature")
assert(tx["confirmations"] == 1)
txFound += 1
assert(txFound == 6)
# disconnect the nodes mine blocks on each
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
blocks0 = self.nodes[0].getblockchaininfo()["blocks"]
blocks1 = self.nodes[1].getblockchaininfo()["blocks"]
slow_gen(self.nodes[0], 2)
slow_gen(self.nodes[1], 1)
assert(self.nodes[0].getblockchaininfo()["blocks"] == blocks0 + 2)
assert(self.nodes[1].getblockchaininfo()["blocks"] == blocks1 + 1)
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
# reconnect the node making node 1 reorg
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes)
assert(self.nodes[0].getblockchaininfo()["blocks"] == blocks0 + 2)
assert(self.nodes[1].getblockchaininfo()["blocks"] == blocks1 + 2)
assert(self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash())
slow_gen(self.nodes[1], 1)
bestblockHash = self.nodes[1].getbestblockhash()
bestBlock = self.nodes[1].getblock(bestblockHash)
# Check that the only tx in the block is the block reward
assert(len(bestBlock["tx"]) == 1)
if __name__ == '__main__':
CommunityFundPaymentRequestDuplicate().main()
|
the-stack_106_15699
|
#
# Copyright Cloudlab URV 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import ssl
import json
import time
import logging
import shutil
import zipfile
import http.client
from urllib.parse import urlparse
from azure.storage.queue import QueueServiceClient
from lithops import utils
from lithops.version import __version__
from lithops.constants import COMPUTE_CLI_MSG, TEMP_DIR
from . import config
logger = logging.getLogger(__name__)
class AzureFunctionAppBackend:
"""
A wrap-up around Azure Function Apps backend.
"""
def __init__(self, af_config, internal_storage):
logger.debug("Creating Azure Functions client")
self.name = 'azure_functions'
self.type = 'faas'
self.af_config = af_config
self.invocation_type = af_config['invocation_type']
self.resource_group = af_config['resource_group']
self.storage_account_name = af_config['storage_account_name']
self.storage_account_key = af_config['storage_account_key']
self.location = af_config['location']
self.functions_version = self.af_config['functions_version']
self.queue_service_url = f'https://{self.storage_account_name}.queue.core.windows.net'
self.queue_service = QueueServiceClient(account_url=self.queue_service_url,
credential=self.storage_account_key)
msg = COMPUTE_CLI_MSG.format('Azure Functions')
logger.info(f"{msg} - Location: {self.location}")
def _format_function_name(self, runtime_name, runtime_memory=None):
"""
Formates the function name
"""
ver = __version__.replace('.', '-')
function_name = f'{self.storage_account_name}-{runtime_name}-{ver}-{self.invocation_type}'
return function_name
def _format_queue_name(self, function_name, q_type):
return function_name.replace('--', '-') + '-' + q_type
def _get_default_runtime_name(self):
"""
Generates the default runtime name
"""
py_version = utils.CURRENT_PY_VERSION.replace('.', '')
return f'lithops-default-runtime-v{py_version}'
def deploy_runtime(self, runtime_name, memory, timeout):
"""
Deploys a new runtime into Azure Function Apps
from the provided Linux image for consumption plan
"""
if runtime_name == self._get_default_runtime_name():
self._build_default_runtime(runtime_name)
logger.info(f"Deploying runtime: {runtime_name} - Memory: {memory} Timeout: {timeout}")
self._create_function(runtime_name, memory, timeout)
metadata = self._generate_runtime_meta(runtime_name, memory)
return metadata
def _build_default_runtime(self, runtime_name):
"""
Builds the default runtime
"""
requirements_file = os.path.join(TEMP_DIR, 'az_default_requirements.txt')
with open(requirements_file, 'w') as reqf:
reqf.write(config.REQUIREMENTS_FILE)
try:
self.build_runtime(runtime_name, requirements_file)
finally:
os.remove(requirements_file)
def build_runtime(self, runtime_name, requirements_file, extra_args=[]):
logger.info(f'Building runtime {runtime_name} from {requirements_file}')
try:
shutil.rmtree(config.BUILD_DIR)
except Exception:
pass
function_name = self._format_function_name(runtime_name)
build_dir = os.path.join(config.BUILD_DIR, function_name)
os.makedirs(build_dir, exist_ok=True)
action_dir = os.path.join(build_dir, config.ACTION_DIR)
os.makedirs(action_dir, exist_ok=True)
logger.debug(f'Building runtime in {build_dir}')
with open(requirements_file, 'r') as req_file:
req_data = req_file.read()
req_file = os.path.join(build_dir, 'requirements.txt')
with open(req_file, 'w') as reqf:
reqf.write(req_data)
if not utils.is_unix_system():
if 'dev' in __version__:
reqf.write('git+https://github.com/lithops-cloud/lithops')
else:
reqf.write(f'lithops=={__version__}')
host_file = os.path.join(build_dir, 'host.json')
with open(host_file, 'w') as hstf:
hstf.write(config.HOST_FILE)
fn_file = os.path.join(action_dir, 'function.json')
if self.invocation_type == 'event':
with open(fn_file, 'w') as fnf:
in_q_name = self._format_queue_name(function_name, config.IN_QUEUE)
config.BINDINGS_QUEUE['bindings'][0]['queueName'] = in_q_name
out_q_name = self._format_queue_name(function_name, config.OUT_QUEUE)
config.BINDINGS_QUEUE['bindings'][1]['queueName'] = out_q_name
fnf.write(json.dumps(config.BINDINGS_QUEUE))
elif self.invocation_type == 'http':
with open(fn_file, 'w') as fnf:
fnf.write(json.dumps(config.BINDINGS_HTTP))
entry_point = os.path.join(os.path.dirname(__file__), 'entry_point.py')
main_file = os.path.join(action_dir, '__init__.py')
shutil.copy(entry_point, main_file)
if utils.is_unix_system():
mod_dir = os.path.join(build_dir, config.ACTION_MODULES_DIR)
os.chdir(build_dir)
cmd = f'{sys.executable} -m pip install -U -t {mod_dir} -r requirements.txt'
utils.run_command(cmd)
utils.create_handler_zip(config.FH_ZIP_LOCATION, entry_point, '__init__.py')
archive = zipfile.ZipFile(config.FH_ZIP_LOCATION)
archive.extractall(path=mod_dir)
os.remove(mod_dir+'/__init__.py')
os.remove(config.FH_ZIP_LOCATION)
logger.debug(f'Runtime {runtime_name} built successfully')
def _create_function(self, runtime_name, memory, timeout):
"""
Create and publish an Azure Functions
"""
logger.info(f'Creating Azure Function from runtime {runtime_name}')
function_name = self._format_function_name(runtime_name, memory)
if self.invocation_type == 'event':
try:
in_q_name = self._format_queue_name(function_name, config.IN_QUEUE)
logger.debug(f'Creating queue {in_q_name}')
self.queue_service.create_queue(in_q_name)
except Exception:
in_queue = self.queue_service.get_queue_client(in_q_name)
in_queue.clear_messages()
try:
out_q_name = self._format_queue_name(function_name, config.OUT_QUEUE)
logger.debug(f'Creating queue {out_q_name}')
self.queue_service.create_queue(out_q_name)
except Exception:
out_queue = self.queue_service.get_queue_client(out_q_name)
out_queue.clear_messages()
cmd = (f'az functionapp create --name {function_name} '
f'--storage-account {self.storage_account_name} '
f'--resource-group {self.resource_group} '
'--os-type Linux --runtime python '
f'--runtime-version {utils.CURRENT_PY_VERSION} '
f'--functions-version {self.functions_version} '
f'--consumption-plan-location {self.location} '
f'--tags type=lithops-runtime lithops_version={__version__} runtime_name={runtime_name}')
utils.run_command(cmd)
time.sleep(10)
logger.debug(f'Publishing function: {function_name}')
build_dir = os.path.join(config.BUILD_DIR, function_name)
os.chdir(build_dir)
if utils.is_unix_system():
cmd = f'func azure functionapp publish {function_name} --python --no-build'
else:
cmd = f'func azure functionapp publish {function_name} --python'
while True:
try:
utils.run_command(cmd)
break
except Exception as e:
time.sleep(10)
time.sleep(10)
def delete_runtime(self, runtime_name, memory):
"""
Deletes a runtime
"""
logger.info(f'Deleting runtime: {runtime_name} - {memory}MB')
function_name = self._format_function_name(runtime_name, memory)
cmd = f'az functionapp delete --name {function_name} --resource-group {self.resource_group}'
utils.run_command(cmd)
try:
in_q_name = self._format_queue_name(function_name, config.IN_QUEUE)
self.queue_service.delete_queue(in_q_name)
except Exception:
pass
try:
out_q_name = self._format_queue_name(function_name, config.OUT_QUEUE)
self.queue_service.delete_queue(out_q_name)
except Exception:
pass
def invoke(self, docker_image_name, memory=None, payload={}, return_result=False):
"""
Invoke function
"""
function_name = self._format_function_name(docker_image_name, memory)
if self.invocation_type == 'event':
in_q_name = self._format_queue_name(function_name, config.IN_QUEUE)
in_queue = self.queue_service.get_queue_client(in_q_name)
msg = in_queue.send_message(utils.dict_to_b64str(payload))
activation_id = msg.id
if return_result:
out_q_name = self._format_queue_name(function_name, config.OUT_QUEUE)
out_queue = self.queue_service.get_queue_client(out_q_name)
msg = []
while not msg:
time.sleep(1)
msg = out_queue.receive_message()
out_queue.clear_messages()
return utils.b64str_to_dict(msg.content)
elif self.invocation_type == 'http':
endpoint = f"https://{function_name}.azurewebsites.net"
parsed_url = urlparse(endpoint)
ctx = ssl._create_unverified_context()
conn = http.client.HTTPSConnection(parsed_url.netloc, context=ctx)
route = "/api/lithops_handler"
if return_result:
conn.request("GET", route, body=json.dumps(payload, default=str))
resp = conn.getresponse()
data = json.loads(resp.read().decode("utf-8"))
conn.close()
return data
else:
# logger.debug('Invoking calls {}'.format(', '.join(payload['call_ids'])))
conn.request("POST", route, body=json.dumps(payload, default=str))
resp = conn.getresponse()
if resp.status == 429:
time.sleep(0.2)
conn.close()
return None
activation_id = resp.read().decode("utf-8")
conn.close()
return activation_id
def get_runtime_key(self, docker_image_name, runtime_memory):
"""
Method that creates and returns the runtime key.
Runtime keys are used to uniquely identify runtimes within the storage,
in order to know which runtimes are installed and which not.
"""
function_name = self._format_function_name(docker_image_name, runtime_memory)
runtime_key = os.path.join(self.name, __version__, function_name)
return runtime_key
def clean(self):
"""
Deletes all Lithops Azure Function Apps runtimes
"""
logger.debug('Deleting all runtimes')
runtimes = self.list_runtimes()
for runtime_name, runtime_memory, version in runtimes:
self.delete_runtime(runtime_name, runtime_memory)
def _generate_runtime_meta(self, docker_image_name, memory):
"""
Extract metadata from Azure runtime
"""
logger.info(f"Extracting metadata from: {docker_image_name}")
payload = {'log_level': logger.getEffectiveLevel(), 'get_metadata': True}
try:
runtime_meta = self.invoke(docker_image_name, memory=memory,
payload=payload, return_result=True)
except Exception as e:
raise Exception(f"Unable to extract metadata: {e}")
if not runtime_meta or 'preinstalls' not in runtime_meta:
raise Exception(runtime_meta)
logger.debug("Extracted metadata succesfully")
return runtime_meta
def list_runtimes(self, runtime_name='all'):
"""
List all the Azure Function Apps deployed.
return: Array of tuples (function_name, memory)
"""
logger.debug('Listing all functions deployed...')
runtimes = []
response = os.popen('az functionapp list --query "[].{Name:name, Tags:tags}\"').read()
response = json.loads(response)
for function in response:
if function['Tags'] and 'type' in function['Tags'] \
and function['Tags']['type'] == 'lithops-runtime':
version = function['Tags']['lithops_version']
runtime = function['Tags']['runtime_name']
if runtime_name == function['Name'] or runtime_name == 'all':
runtimes.append((runtime, 'shared', version))
return runtimes
def get_runtime_info(self):
"""
Method that returns all the relevant information about the runtime set
in config
"""
if utils.CURRENT_PY_VERSION not in config.AVAILABLE_PY_RUNTIMES:
raise Exception(f'Python {utils.CURRENT_PY_VERSION} is not available for'
f'Azure Functions. Please use one of {config.AVAILABLE_PY_RUNTIMES}')
if 'runtime' not in self.af_config or self.af_config['runtime'] == 'default':
self.af_config['runtime'] = self._get_default_runtime_name()
runtime_info = {
'runtime_name': self.af_config['runtime'],
'runtime_memory': self.af_config['runtime_memory'],
'runtime_timeout': self.af_config['runtime_timeout'],
'max_workers': self.af_config['max_workers'],
}
return runtime_info
|
the-stack_106_15701
|
"""AWS Lambda function to support an Alexa skill that opens and closes one or two garage doors.
Pymyq provides access to the Chamberlain API.
The Alexa interaction model assumes:
- door states in the API contain all the DoorState values
- door commands in the API contain the DoorCommand values
See https://github.com/arraylabs/pymyq/blob/master/pymyq/device.py
"""
import asyncio
import logging
from typing import TYPE_CHECKING
import pymyq
from aiohttp import ClientSession
from environs import Env
from pymyq.api import API
if TYPE_CHECKING:
from pymyq.garagedoor import MyQGaragedoor
VERSION = '1.0.5'
# load system env vars and read .env (set override=True for .env values to override existing vars)
env = Env()
env.read_env(override=False)
logger = logging.getLogger()
logger.setLevel(env.log_level('LOG_LEVEL', logging.INFO))
class InputException(Exception):
"""Signifies input that was not understood"""
pass
class GarageRequestHandler:
"""Handle a request by the garage skill"""
myq: API
user_name: str
password: str
# The order of the doors returned in the MyQ covers dictionary.
# Using order (while arbitrary) is simpler than specifying the names of the left and right doors.
left_door: int
right_door: int
# By default, the skill will not open the door. Set env var NO_OPEN to 'No'
only_close: bool
def __init__(self):
self.validate_env()
# information messages that may be modified if there is only one door
self.move_msg = 'close the left or right door'
if not self.only_close:
self.move_msg = 'open or ' + self.move_msg
self.check_msg = "check the state of your garage door by asking what's up"
self.check1_msg = 'check the state of your garage door by asking if the left or right door is open'
def validate_env(self) -> None:
"""Make sure environment is set up correctly. Else raise an exception."""
errors = []
self.user_name = env.str('USER_NAME')
if not self.user_name:
errors.append('USER_NAME environment variable needs to be set to your MyQ user name')
self.password = env.str('PASSWORD')
if not self.password:
errors.append('PASSWORD environment variable needs to be set to your MyQ password')
self.left_door = env.int('LEFT', 0)
self.right_door = 1 - self.left_door
self.only_close = env.bool('ONLY_CLOSE', True)
if errors:
raise Exception(','.join(errors))
# see https://developer.amazon.com/blogs/alexa/post/
# 5882651c-6377-4bc7-bfd7-0fd661d95abc/entity-resolution-in-skill-builder
@staticmethod
def slot_value_id(intent, slot):
try:
return intent['slots'][slot]['resolutions']['resolutionsPerAuthority'][0]['values'][0]['value']['id']
except KeyError as e:
# if the input couldn't be parsed, there will be no values
if str(e) == "'values'":
raise InputException(slot) from None
else:
raise
def has_one_door(self):
return len(self.myq.covers) == 1
def get_door(self, device_ind: int) -> 'MyQGaragedoor':
return list(self.myq.covers.values())[device_ind]
def get_door_index(self, door_name: str) -> int:
"""Convert a door name to an index"""
if door_name == 'left':
return self.left_door
elif door_name == 'right':
return self.right_door
elif door_name == 'both':
return 0
else:
return int(door_name) - 1
def status(self, device_ind: int) -> str:
door = self.get_door(device_ind)
logger.info(f'Check door state: {door.name} ({device_ind}) is {door.state}')
return door.state
async def open_door(self, device_ind: int) -> None:
door = self.get_door(device_ind)
logger.info(f'Change door state: {door.name} ({device_ind}) is {door.state}')
await door.open()
async def close_door(self, device_ind: int) -> None:
door = self.get_door(device_ind)
logger.info(f'Change door state: {door.name} ({device_ind}) is {door.state}')
await door.close()
# Called when the user launches the skill without specifying what they want.
def on_launch(self) -> dict:
return self.get_welcome_response()
# Called when the user specifies an intent for this skill.
async def on_intent(self, intent: dict) -> dict:
intent_name = intent['name']
if intent_name == 'StateIntent':
return self.execute_state_intent(intent)
elif intent_name == 'AllStatesIntent':
if self.has_one_door():
return self.execute_state1_intent()
else:
return self.execute_all_states_intent()
elif intent_name == 'MoveIntent':
return await self.execute_move_intent(intent)
elif intent_name == 'AMAZON.HelpIntent':
return self.get_welcome_response()
elif intent_name in ('AMAZON.StopIntent', 'AMAZON.CancelIntent'):
return self.execute_stop_intent()
else:
raise Exception(f"Invalid Intent ('{intent_name}')")
# Called when the user ends the session.
# Is not called when the skill returns should_end_session=true.
@staticmethod
def on_session_ended() -> dict:
# Add cleanup logic here
logger.info('Session ended')
return {}
def get_welcome_response(self) -> dict:
speech_output = f'You can {self.move_msg}. You can also {self.check_msg}.'
return self.build_speechlet_response('Welcome', speech_output)
async def execute_move_intent(self, intent: dict) -> dict:
# Ask garage {door|door 1|door 2} to {open|close|shut}
# "intent": {
# "name": "StateIntent",
# "slots": {
# "Name": {
# "name": "Name",
# "value": "1"
# }
# "Command": {
# "name": "Command",
# "value": "close"
# }
# }
# }
failure_msg = f"I didn't understand that. You can say {self.move_msg}."
reprompt_msg = f'Ask me to {self.move_msg}.'
try:
door_name = intent['slots']['Name']['value']
door_name_id = self.slot_value_id(intent, 'Name')
door_action_id = self.slot_value_id(intent, 'Command')
if door_name_id == 'both' and not self.has_one_door():
if door_action_id == 'close':
return await self.execute_close_all_intent()
else:
return await self.execute_open_all_intent()
device_ind = self.get_door_index(door_name_id)
door_state = self.status(device_ind)
if door_action_id == 'close':
card_title = 'Close door'
if door_state in ('closed', 'closing'):
speech_output = f'{door_name} is already {door_state}'
else:
await self.close_door(device_ind)
speech_output = f'Ok, closing {door_name} now'
else:
card_title = 'Open door'
if door_state in ('open', 'opening'):
speech_output = f'{door_name} is already {door_state}'
elif self.only_close:
speech_output = 'Sorry, I can only close the door'
card_title = 'Try again'
else:
await self.open_door(device_ind)
speech_output = f'Ok, opening {door_name} now'
return self.build_speechlet_response(card_title, speech_output)
except InputException:
logger.exception(f'Error executing {intent}')
return self.build_speechlet_response('Try again', failure_msg, reprompt_msg)
async def execute_open_all_intent(self):
# Open all doors
door_state_left = self.status(self.left_door)
door_state_right = self.status(self.right_door)
card_title = 'Open doors'
if door_state_left not in ['open', 'opening']:
await self.open_door(self.left_door)
if door_state_right not in ['open', 'opening']:
await self.open_door(self.right_door)
if door_state_left not in ['open', 'opening'] and door_state_right not in ['open', 'opening']:
speech_output = 'Ok, opening both garage doors now'
elif door_state_left not in ['open', 'opening']:
speech_output = 'Ok, opening the left garage door now'
elif door_state_right not in ['open', 'opening']:
speech_output = 'Ok, opening the right garage door now'
else:
speech_output = 'Both doors are open'
return self.build_speechlet_response(card_title, speech_output)
async def execute_close_all_intent(self):
# Close all doors
door_state_left = self.status(self.left_door)
door_state_right = self.status(self.right_door)
card_title = 'Close doors'
if door_state_left not in ['closed', 'closing']:
await self.close_door(self.left_door)
if door_state_right not in ['closed', 'closing']:
await self.close_door(self.right_door)
if door_state_left not in ['closed', 'closing'] and door_state_right not in ['closed', 'closing']:
speech_output = 'Ok, closing both garage doors now'
elif door_state_left not in ['closed', 'closing']:
speech_output = 'Ok, closing the left garage door now'
elif door_state_right not in ['closed', 'closing']:
speech_output = 'Ok, closing the right garage door now'
else:
speech_output = 'Both doors are closed'
return self.build_speechlet_response(card_title, speech_output)
def execute_state_intent(self, intent: dict) -> dict:
# Ask garage if {door|door 1|door 2} is {open|up|closed|shut|down}
# 'intent': {
# 'name': 'StateIntent',
# 'slots': {
# 'Name': {
# 'name': 'name',
# 'value': '1'
# }
# 'State': {
# 'name': 'state',
# 'value': 'closed'
# }
# }
# }
failure_msg = f"I didn't understand that. You can {self.check1_msg}."
reprompt_msg = f'Ask me to {self.check1_msg}.'
try:
door_name = intent['slots']['Name']['value']
door_name_id = self.slot_value_id(intent, 'Name')
device_ind = self.get_door_index(door_name_id)
door_state = intent['slots']['State']['value']
actual_door_state = self.status(device_ind)
if not door_state:
speech_output = f'{door_name} is {actual_door_state}'
else:
door_state_id = self.slot_value_id(intent, 'State')
if door_state_id == actual_door_state:
speech_output = f'Yes, {door_name} is {door_state}'
else:
speech_output = f'No, {door_name} is {actual_door_state}'
card_title = 'Check door status'
return self.build_speechlet_response(card_title, speech_output)
except InputException:
logger.exception(f'Error executing {intent}')
return self.build_speechlet_response('Try again', failure_msg, reprompt_msg)
def execute_all_states_intent(self) -> dict:
# Ask garage what's up
door_state_left = self.status(self.left_door)
door_state_right = self.status(self.right_door)
if door_state_left == door_state_right:
speech_output = f'Both doors are {door_state_left}'
else:
speech_output = f'The left door is {door_state_left}, and the right door is {door_state_right}.'
card_title = 'Check door status'
return self.build_speechlet_response(card_title, speech_output)
def execute_state1_intent(self) -> dict:
# Ask garage what's up when there's one door
door_state = self.status(0)
speech_output = f'The door is {door_state}.'
card_title = 'Check door status'
return self.build_speechlet_response(card_title, speech_output)
def execute_stop_intent(self) -> dict:
# Cancel or stop
return self.build_speechlet_response('Goodbye', 'Goodbye')
# --------------- Helpers that build all of the responses -----------------------
@staticmethod
def build_speechlet_response(title: str, output: str, reprompt_text: str = '') -> dict:
# If reprompt_text is available and the user either does not reply message or says something
# that is not understood, they will be prompted again with the reprompt_text.
should_end_session = not reprompt_text
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': f'MyQ - {title}',
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'should_end_session': should_end_session
}
@staticmethod
def build_response(session_attributes, speechlet_response) -> dict:
return {
'version': '1.0',
'session_attributes': session_attributes,
'response': speechlet_response
}
async def process_with_session(self, event: dict, http_session: ClientSession) -> dict:
"""Process the event and return a speechlet"""
self.myq = await pymyq.login(self.user_name, self.password, http_session)
if self.has_one_door():
self.move_msg = self.move_msg.replace(' left or right', '')
self.check_msg = self.check1_msg = self.check1_msg.replace(' left or right', '')
if event['session']['new']:
logger.info(f"New session: request_id={event['request']['requestId']}, "
f"sessionId={event['session']['sessionId']}")
request_type = event['request']['type']
if request_type == 'LaunchRequest':
return self.on_launch()
elif request_type == 'IntentRequest':
return await self.on_intent(event['request']['intent'])
elif request_type == 'SessionEndedRequest':
return self.on_session_ended()
else:
logger.error(f'Unknown request type: {request_type}')
raise InputException(request_type)
# noinspection PyBroadException
async def process(self, event: dict) -> dict:
"""Create the aiohttp session and run"""
try:
async with ClientSession() as http_session:
speechlet = await self.process_with_session(event, http_session)
except Exception:
logger.exception(f'Error executing {event}')
speechlet = self.build_speechlet_response('Try again', 'Sorry. There was an error processing your request')
# Not using sessions for now
session_attributes = {}
# Return a response for speech output
return self.build_response(session_attributes, speechlet)
def lambda_handler(event: dict, _context=None) -> dict:
logger.info(f'Alexa-PyMyQ {VERSION}')
logger.debug(f'Event: {event}')
handler = GarageRequestHandler()
return asyncio.get_event_loop().run_until_complete(handler.process(event))
|
the-stack_106_15702
|
# -*- coding: utf-8 -*-
import fire
import logging
import sys
from collections import defaultdict
import numpy as np
from gensim import matutils
from gensim.models.ldamulticore import LdaMulticore
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from gensim.models.ldamodel import LdaModel
class TwentyNewsGroup(object):
"""
TwentyNewsGroup 语料集
"""
def __init__(self):
self.data = defaultdict(list)
self.count_vect = CountVectorizer()
def fetch_data(self, subset='train', categories=None):
"""return data
执行数据抓取操作
Arguments:
subset -> string -- 抓取的目标集合 train / test / all
"""
rand = np.random.mtrand.RandomState(8675309)
data = fetch_20newsgroups(subset=subset,
categories=categories,
shuffle=True,
random_state=rand)
self.data[subset] = data
def fetch_data_and_dump(self, subset='train', categories=None, output_path='./20newsgroups.txt'):
"""
执行数据抓取并且将数据持久化存储到磁盘中
Arguments:
subset -> string -- 抓取的目标集合 train / test / all
"""
def extract_feature(self):
"""
从语料集中抽取文档特征
"""
# 获取训练数据的文档-词矩阵
self.train_dtm = self.count_vect.fit_transform(self.data['train'].data)
# 获取文档的 TF 特征
tf_transformer = TfidfTransformer(use_idf=False)
self.train_tf = tf_transformer.transform(self.train_dtm)
# 获取文档的 TF-IDF 特征
tfidf_transformer = TfidfTransformer().fit(self.train_dtm)
self.train_tfidf = tf_transformer.transform(self.train_dtm)
def train_classifier(self):
"""
从训练集中训练出分类器
"""
self.extract_feature();
self.clf = MultinomialNB().fit(
self.train_tfidf, self.data['train'].target)
def predict(self, docs):
"""
从训练集中训练出分类器
"""
X_new_counts = self.count_vect.transform(docs)
tfidf_transformer = TfidfTransformer().fit(X_new_counts)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
return self.clf.predict(X_new_tfidf)
def topics_by_lda(self, num_topics=20, num_words=10):
"""
利用 LDA 模型进行语料集分析
Arguments:
num_topics -> integer -- 既定的主题数目
num_words -> integer -- 最终返回的单主题词数目
"""
# 如果是从命令行启动则执行数据抓取
if not hasattr(self, "data"):
logging.info("数据集尚未准备,重新准备数据集中!")
self.fetch_data()
# 构建语料集统计向量
vec = CountVectorizer(min_df=10, max_df=80, stop_words='english')
# 对于数据进行分析
X = vec.fit_transform(self.data['train'].data)
# 获取词表
vocab = vec.get_feature_names()
# 构建多核 LDA 模型
lda = LdaModel(
matutils.Sparse2Corpus(X, documents_columns=False),
num_topics=num_topics,
id2word=dict([(i, s) for i, s in enumerate(vocab)])
)
# 打印并且返回主题数据
topics = lda.show_topics(
num_topics=num_topics,
num_words=num_words,
formatted=False,
log=False)
for ti, topic in enumerate(topics):
print("Topic", ti, ":", " ".join(word[0] for word in topic[1]))
if __name__ != '__main__':
return topics
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
fire.Fire(TwentyNewsGroup)
|
the-stack_106_15703
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype
from dfirtrack_config.models import Workflow, WorkflowDefaultArtifactAttributes
class WorkflowDefaultArtifactAttributesModelTestCase(TestCase):
"""workflow model tests"""
@classmethod
def setUpTestData(cls):
# create objects
artifacttype_1 = Artifacttype.objects.create(artifacttype_name='artifacttype_1')
artfactstatus_1 = Artifactstatus.objects.get(
artifactstatus_name='10_needs_analysis'
)
artfactpriority_1 = Artifactpriority.objects.get(artifactpriority_name='10_low')
test_user = User.objects.create_user(
username='testuser_workflow_artifact_default_name',
password='QVe1EH1Z5MshOW2GHS4b',
)
workflow = Workflow.objects.create(
workflow_name='workflow_artifact_default_name',
workflow_created_by_user_id=test_user,
workflow_modified_by_user_id=test_user,
)
WorkflowDefaultArtifactAttributes.objects.create(
workflow=workflow,
artifacttype=artifacttype_1,
artifact_default_name='artifact_default_name_1',
artifact_default_priority=artfactpriority_1,
artifact_default_status=artfactstatus_1,
)
''' test model methods '''
def test_workflow_default_artifactname_string(self):
"""model test"""
# get object
wda = WorkflowDefaultArtifactAttributes.objects.get(
artifact_default_name='artifact_default_name_1'
)
# compare
self.assertEqual(str(wda), 'artifact_default_name_1')
def test_workflow_default_artifactname_length(self):
"""model test"""
# get object
wda = WorkflowDefaultArtifactAttributes.objects.get(
artifact_default_name='artifact_default_name_1'
)
# get max length
max_length = wda._meta.get_field('artifact_default_name').max_length
# compare
self.assertEqual(max_length, 50)
''' test model labels '''
def helper_workflow_default_artifactname_attribute_label(
self, field, expected_label
):
"""helper function"""
# get object
wda = WorkflowDefaultArtifactAttributes.objects.get(
artifact_default_name='artifact_default_name_1'
)
# get label
field_label = wda._meta.get_field(field).verbose_name
# compare
self.assertEqual(field_label, expected_label)
def test_workflow_default_artifactname_id_attribute_label(self):
"""label test"""
self.helper_workflow_default_artifactname_attribute_label(
'workflow_default_artifactname_id', 'workflow default artifactname id'
)
def test_workflow_default_artifactname_artifacttype_attribute_label(self):
"""label test"""
self.helper_workflow_default_artifactname_attribute_label(
'artifacttype', 'artifacttype'
)
def test_workflow_default_artifactname_workflow_attribute_label(self):
"""label test"""
self.helper_workflow_default_artifactname_attribute_label(
'workflow', 'workflow'
)
def test_workflow_default_artifactname_artifact_default_name_attribute_label(self):
"""label test"""
self.helper_workflow_default_artifactname_attribute_label(
'artifact_default_name', 'artifact default name'
)
def test_workflow_default_artifactname_artifact_default_priority_attribute_label(
self,
):
"""label test"""
self.helper_workflow_default_artifactname_attribute_label(
'artifact_default_priority', 'artifact default priority'
)
def test_workflow_default_artifactname_artifact_default_status_attribute_label(
self,
):
"""label test"""
self.helper_workflow_default_artifactname_attribute_label(
'artifact_default_status', 'artifact default status'
)
|
the-stack_106_15705
|
import io
import re
import typing as t
import discord
import requests
from discord.ext.commands import BadArgument, Cog, Context, command
from discord.utils import get
from bot import config
from bot.core.bot import Bot
class InvalidCommandException(Exception):
pass
class EmoteNotFoundException(Exception):
pass
class Emote:
content_re = re.compile(r"^\b(twitch|bttv|ffz)\b\s([\w\d]+)(?:\s(.+))?$", re.I | re.M)
def __init__(self, emote_type: str, emote_id: str, emote_channel: t.Optional[str]) -> None:
self.emote_type = emote_type
self.emote_id = emote_id
self.emote_channel = emote_channel
self.name = self.get_name()
self.image = self.get_image()
def get_name(self) -> str:
if self.emote_type == "twitch":
api_url = "https://api.twitchemotes.com/api/v4/emotes"
api_res = requests.get(api_url, params={"id": self.emote_id}).json()
return api_res[0]["code"]
elif self.emote_type == "frf":
api_url = f"https://api.frankerfacez.com/v1/emote/{self.emote_id}"
api_res = requests.get(api_url).json()
return api_res["emote"]["name"]
elif self.emote_type == "btv":
if self.emote_channel == "global":
api_url = "https://api.betterttv.net/2/emotes"
else:
api_url = f"https://api.betterttv.net/2/channels/{self.emote_channel}"
api_res = requests.get(api_url).json()
for emote in api_res["emotes"]:
if emote["id"] == self.emote_id:
return emote["code"]
def get_image(self) -> io.BytesIO:
img = None
if self.emote_type == "twitch":
img = requests.get(f"https://static-cdn.jtvnw.net/emoticons/v1/{self.emote_id}/3.0").content
elif self.emote_type == "bttv":
img = requests.get(f"https://cdn.betterttv.net/emote/{self.emote_id}/3x").content
elif self.emote_type == "ffz":
img = requests.get(f"https://cdn.frankerfacez.com/emoticon/{self.emote_id}/4").content
return io.BytesIO(img)
@classmethod
def get_emote(cls, content) -> "Emote":
content_match = re.match(Emote.content_re, content)
if not content_match:
raise BadArgument()
emote_type = content_match[1].lower()
emote_id = content_match[2].strip().lower()
emote_channel = None
if emote_type == "bttv":
emote_channel = content_match[3]
if not emote_channel:
raise BadArgument()
emote_channel = emote_channel.lower()
try:
return cls(emote_type, emote_id, emote_channel)
except (KeyError, IndexError):
raise EmoteNotFoundException()
class Emotes(Cog):
def __init__(self, bot: Bot) -> None:
self.bot = bot
# TODO: Remove this when error handler will be implemented
async def send_error(self, ctx: Context, error: str) -> None:
"""Sends the Error of Any functions as an Embed."""
help_message = f"Type `{config.COMMAND_PREFIX}help` for further assistance"
embed = discord.Embed(colour=discord.Colour.red())
embed.add_field(name=f"Error: {error}", value=help_message)
await ctx.send(embed=embed)
@command()
async def add_emote(self, ctx: Context, *, content: str) -> None:
"""
Add an emote to server
Usage:
- add_emote twitch <emote_id>
- add_emote btv <emote_id> <channel_name>
- add_emote frf <emote_id>
To get an emote visit:
- https://twitchemotes.com
- https://betterttv.com/emotes/shared
- https://www.frankerfacez.com/emoticons/
and find an emote you like!.
The channel name for BetterTTV emotes is found in the top right section of the web page for the emote
The the ID of the emote is found at the end of the URL for a specific emote.
- twitchemotes.com/emotes/120232
- betterttv.com/emotes/5771aa498bbc1e572cb7ae4d
- frankerfacez.com/emoticon/261802-4Town
"""
try:
emote = Emote.get_emote(content)
except BadArgument:
await self.send_error(ctx, "Invalid argument")
return
except EmoteNotFoundException:
await self.send_error(ctx, "Emote not found")
return
await ctx.guild.create_custom_emoji(name=emote.name, image=emote.image.read())
discord_emote = get(ctx.guild.emojis, name=emote.name)
emote_string = f"<:{discord_emote.name}:{discord_emote.id}>"
if discord_emote.animated:
emote_string = f"<a:{discord_emote.name}:{discord_emote.id}>"
await ctx.send(emote_string)
@command()
async def emote(self, ctx: Context, *, content: str) -> None:
"""
Send an emote.
Supply emote names as a comma-separated list to send multiple emotes in a single message
"""
names = content.split(",")
emote_string = ""
for name in names:
emote = get(ctx.guild.emojis, name=name)
if not emote:
await self.send_error(ctx, f"Emote {name} not found")
return
if emote.animated:
emote_string += f"<a:{emote.name}:{emote.id}>"
else:
emote_string += f"<:{emote.name}:{emote.id}>"
await ctx.send(emote_string)
await ctx.message.delete()
def setup(bot: Bot) -> None:
bot.add_cog(Emotes(bot))
|
the-stack_106_15706
|
import os
import os.path
import gzip
import hashlib
import datetime
import uuid
import copy
from collections import defaultdict
from blitzdb.backends.file.serializers import PickleSerializer as Serializer
"""
"""
class Store(object):
"""
This class stores binary data in files.
"""
def __init__(self, properties):
self._properties = properties
if 'path' not in properties:
raise AttributeError("You must specify a path when creating a Store!")
if not os.path.exists(properties['path']):
os.makedirs(properties['path'])
def _get_path_for_key(self, key):
return os.path.join(self._properties['path'], key)
def store_blob(self, blob, key):
with open(self._get_path_for_key(key), "wb") as output_file:
output_file.write(blob)
return key
def delete_blob(self, key):
filepath = self._get_path_for_key(key)
if os.path.exists(filepath):
os.unlink(filepath)
def get_blob(self, key):
try:
with open(self._get_path_for_key(key), "rb") as input_file:
return input_file.read()
except IOError:
raise KeyError("Key {0} not found!".format(key))
def has_blob(self, key):
if os.path.exists(self._get_path_for_key(key)):
return True
return False
def begin(self):
pass
def rollback(self):
pass
def commit(self):
pass
class TransactionalStore(Store):
"""
This class adds transaction support to the Store class.
"""
def __init__(self, properties):
super(TransactionalStore, self).__init__(properties)
self._enabled = True
self.begin()
def begin(self):
self._delete_cache = set()
self._update_cache = {}
def commit(self):
try:
self._enabled = False
for store_key in self._delete_cache:
if super(TransactionalStore, self).has_blob(store_key):
super(TransactionalStore, self).delete_blob(store_key)
for store_key, blob in self._update_cache.items():
super(TransactionalStore, self).store_blob(blob, store_key)
finally:
self._enabled = True
def has_blob(self, key):
if not self._enabled:
return super(TransactionalStore, self).has_blob(key)
if key in self._delete_cache:
return False
if key in self._update_cache:
return True
return super(TransactionalStore, self).has_blob(key)
def get_blob(self, key):
if not self._enabled:
return super(TransactionalStore, self).get_blob(key)
if key in self._update_cache:
return self._update_cache[key]
return super(TransactionalStore, self).get_blob(key)
def store_blob(self, blob, key, *args, **kwargs):
if not self._enabled:
return super(TransactionalStore, self).store_blob(blob, key, *args, **kwargs)
if key in self._delete_cache:
self._delete_cache.remove(key)
self._update_cache[key] = copy.copy(blob)
return key
def delete_blob(self, key, *args, **kwargs):
if not self._enabled:
return super(TransactionalStore, self).delete_blob(key, *args, **kwargs)
if not self.has_blob(key):
raise KeyError("Key %s not found!" % key)
self._delete_cache.add(key)
if key in self._update_cache:
del self._update_cache[key]
def rollback(self):
self._delete_cache = set()
self._update_cache = {}
|
the-stack_106_15708
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
data=np.genfromtxt(path,delimiter=",",skip_header=1)
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
census=np.concatenate([data,new_record])
#Code starts here
# --------------
#Code starts here
x=census[0:1001,0:1]
age=x.flatten()
max_age=age.max()
min_age=age.min()
age_mean=age.mean()
age_std=age.std()
# print(max_age,min_age,age_mean,age_std)
# --------------
#Code starts here
ar=census[:,2]
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
li=[len_0,len_1,len_2,len_3,len_4]
k=min(li)
minority_race=li.index(k)
# --------------
#Code starts here
senior_citizens=census[census[:,0]>60]
working_hours_sum=int(senior_citizens[:,6].sum())
senior_citizens_len=len(senior_citizens)
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=high[:,7].mean()
avg_pay_low=low[:,7].mean()
|
the-stack_106_15709
|
import numpy as np
from scipy.ndimage import affine_transform
# Functions to convert points to homogeneous coordinates and back
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:,:-1]
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1.astype(np.float32)
image2.astype(np.float32)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
print(new_shape1, new_shape2)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image, interpolation='nearest', cmap='gray')
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def get_output_space(img_ref, imgs, transforms):
"""
Args:
img_ref: reference image
imgs: images to be transformed
transforms: list of affine transformation matrices. transforms[i] maps
points in imgs[i] to the points in img_ref
Returns:
output_shape
"""
assert (len(imgs) == len(transforms))
r, c = img_ref.shape
corners = np.array([[0, 0], [r, 0], [0, c], [r, c]])
all_corners = [corners]
for i in range(len(imgs)):
r, c = imgs[i].shape
H = transforms[i]
corners = np.array([[0, 0], [r, 0], [0, c], [r, c]])
warped_corners = corners.dot(H[:2,:2]) + H[2,:2]
all_corners.append(warped_corners)
# Find the extents of both the reference image and the warped
# target image
all_corners = np.vstack(all_corners)
# The overall output shape will be max - min
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = (corner_max - corner_min)
# Ensure integer shape with np.ceil and dtype conversion
output_shape = np.ceil(output_shape).astype(int)
offset = corner_min
return output_shape, offset
def warp_image(img, H, output_shape, offset):
# Note about affine_transfomr function:
# Given an output image pixel index vector o,
# the pixel value is determined from the input image at position
# np.dot(matrix,o) + offset.
Hinv = np.linalg.inv(H)
m = Hinv.T[:2,:2]
b = Hinv.T[:2,2]
img_warped = affine_transform(img.astype(np.float32),
m, b+offset,
output_shape,
cval=-1)
return img_warped
|
the-stack_106_15710
|
#!/usr/bin/python
#!/usr/bin/python
import os
import os.path
import runtime
import sys
from os.path import join as path_join
from options import *
from os_utils import *
product_values = ['desktop', 'desktop-win32', 'android', 'wasm']
profiles_table = {
'desktop': ['net_4_x'],
'desktop-win32': ['net_4_x'],
'android': ['monodroid', 'monodroid_tools'],
'wasm': ['wasm', 'wasm_tools']
}
test_profiles_table = {
'desktop': [],
'desktop-win32': [],
'android': ['monodroid', 'monodroid_tools'],
'wasm': ['wasm']
}
def configure_bcl(opts: BclOpts):
stamp_file = path_join(opts.configure_dir, '.stamp-bcl-configure')
if os.path.isfile(stamp_file):
return
if not os.path.isfile(path_join(opts.mono_source_root, 'configure')):
runtime.run_autogen(opts)
build_dir = path_join(opts.configure_dir, 'bcl')
mkdir_p(build_dir)
CONFIGURE_FLAGS = [
'--disable-boehm',
'--disable-btls-lib',
'--disable-nls',
'--disable-support-build',
'--with-mcs-docs=no'
]
configure = path_join(opts.mono_source_root, 'configure')
configure_args = CONFIGURE_FLAGS
run_command(configure, args=configure_args, cwd=build_dir, name='configure bcl')
touch(stamp_file)
def make_bcl(opts: BclOpts):
stamp_file = path_join(opts.configure_dir, '.stamp-bcl-make')
if os.path.isfile(stamp_file):
return
build_dir = path_join(opts.configure_dir, 'bcl')
make_args = ['-C', build_dir, '-C', 'mono']
make_args += ['V=1'] if opts.verbose_make else []
run_command('make', args=make_args, name='make bcl')
touch(stamp_file)
def build_bcl(opts: BclOpts):
configure_bcl(opts)
make_bcl(opts)
def clean_bcl(opts: BclOpts):
configure_stamp_file = path_join(opts.configure_dir, '.stamp-bcl-configure')
make_stamp_file = path_join(opts.configure_dir, '.stamp-bcl-make')
build_dir = path_join(opts.configure_dir, 'bcl')
rm_rf(configure_stamp_file, make_stamp_file, build_dir)
def make_product(opts: BclOpts, product: str):
build_bcl(opts)
build_dir = path_join(opts.configure_dir, 'bcl')
profiles = profiles_table[product]
test_profiles = test_profiles_table[product]
install_dir = path_join(opts.install_dir, '%s-bcl' % product)
mkdir_p(install_dir)
for profile in profiles:
mkdir_p('%s/%s' % (install_dir, profile))
make_args = ['-C', build_dir, '-C', 'runtime', 'all-mcs', 'build_profiles=%s' % ' '.join(profiles)]
make_args += ['V=1'] if opts.verbose_make else []
if product == 'desktop-win32':
make_args += ['PROFILE_PLATFORM=win32'] # Requires patch: 'bcl-profile-platform-override.diff'
run_command('make', args=make_args, name='make profiles')
if opts.tests and len(test_profiles) > 0:
test_make_args = ['-C', build_dir, '-C', 'runtime', 'test', 'xunit-test', 'test_profiles=%s' % ' '.join(test_profiles)]
test_make_args += ['V=1'] if opts.verbose_make else []
run_command('make', args=test_make_args, name='make tests')
# Copy the bcl profiles to the output directory
from distutils.dir_util import copy_tree
for profile in profiles:
profile_dir = profile + '-win32' if product == 'desktop-win32' else profile
copy_tree('%s/mcs/class/lib/%s' % (opts.mono_source_root, profile_dir), '%s/%s' % (install_dir, profile_dir))
# Remove unneeded files
import glob
file_patterns = []
file_patterns += ['.*'] # Recursively remove hidden files we shoudln't have copied (e.g.: .stamp)
file_patterns += ['*.dll.so', '*.exe.so'] # Remove pre-built AOT modules. We don't need them and they take a lot of space.
file_patterns += ['*.pdb'] if opts.remove_pdb else []
for profile in profiles:
for file_pattern in file_patterns:
file_pattern_recursive = '%s/**/%s' % (install_dir, file_pattern)
[rm_rf(x) for x in glob.iglob(file_pattern_recursive, recursive=True)]
# godot_android_ext profile (custom 'Mono.Android.dll')
if product == 'android':
this_script_dir = os.path.dirname(os.path.realpath(__file__))
monodroid_profile_dir = '%s/%s' % (install_dir, 'monodroid')
godot_profile_dir = '%s/%s' % (install_dir, 'godot_android_ext')
refs = ['mscorlib.dll', 'System.Core.dll', 'System.dll']
mkdir_p(godot_profile_dir)
android_env_csc_args = [
path_join(this_script_dir, 'files', 'godot-AndroidEnvironment.cs'),
'-target:library', '-out:%s' % path_join(godot_profile_dir, 'Mono.Android.dll'),
'-nostdlib', '-noconfig', '-langversion:latest'
]
android_env_csc_args += ['-r:%s' % path_join(monodroid_profile_dir, r) for r in refs]
run_command('csc', android_env_csc_args)
def clean_product(opts: BclOpts, product: str):
clean_bcl(opts)
install_dir = path_join(opts.install_dir, '%s-bcl' % product)
rm_rf(install_dir)
def main(raw_args):
import cmd_utils
from cmd_utils import custom_bool
actions = {
'make': make_product,
'clean': clean_product
}
parser = cmd_utils.build_arg_parser(description='Builds the Mono BCL')
default_help = 'default: %(default)s'
parser.add_argument('action', choices=actions.keys())
parser.add_argument('--product', choices=product_values, action='append', required=True)
parser.add_argument('--tests', action='store_true', default=False, help=default_help)
parser.add_argument('--remove-pdb', type=custom_bool, default=True, help=default_help)
cmd_utils.add_base_arguments(parser, default_help)
args = parser.parse_args(raw_args)
opts = bcl_opts_from_args(args)
products = args.product
try:
for product in products:
action = actions[args.action]
action(opts, product)
except BuildError as e:
sys.exit(e.message)
if __name__ == '__main__':
from sys import argv
main(argv[1:])
|
the-stack_106_15712
|
import torch
from models import Actor, Critic
#from models_dueling import Actor, Critic
import torch.nn as nn
import torch.nn.functional as F
# Building the whole Training Process into a class
class TD31v1(object):
def __init__(self, state_dim, action_dim, max_action, args):
self.actor = Actor(state_dim, action_dim, max_action).to(args.device)
self.actor_target = Actor(state_dim, action_dim, max_action).to(args.device)
self.actor_target.load_state_dict(self.actor.state_dict())
self.actor_optimizer = torch.optim.Adam(self.actor.parameters())
self.critic = Critic(state_dim, action_dim).to(args.device)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters())
self.list_target_critic = []
for c in range(args.num_q_target):
critic_target = Critic(state_dim, action_dim).to(args.device)
critic_target.load_state_dict(critic_target.state_dict())
self.list_target_critic.append(critic_target)
self.target_critic = Critic(state_dim, action_dim).to(args.device)
self.target_critic.load_state_dict(self.target_critic.state_dict())
self.max_action = max_action
self.num_q_target = args.num_q_target
self.update_counter = 0
self.step = 0
self.currentQNet = 0
self.batch_size = args.batch_size
self.discount = args.discount
self.tau = args.tau
self.policy_noise = args.policy_noise
self.noise_clip = args.noise_clip
self.policy_freq = args.policy_freq
self.device = args.device
print("Use ", self.device)
def select_action(self, state):
state = torch.Tensor(state.reshape(1, -1)).to(self.device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, writer, iterations):
self.step += 1
for it in range(iterations):
# Step 4: We sample a batch of transitions (s, s’, a, r) from the memory
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(self.batch_size)
state = torch.Tensor(batch_states).to(self.device)
next_state = torch.Tensor(batch_next_states).to(self.device)
action = torch.Tensor(batch_actions).to(self.device)
reward = torch.Tensor(batch_rewards).to(self.device)
done = torch.Tensor(batch_dones).to(self.device)
# Step 5: From the next state s’, the Actor target plays the next action a’
next_action = self.actor_target(next_state)
# Step 6: We add Gaussian noise to this next action a’ and we clamp it in a range of values supported by the environment
noise = torch.Tensor(batch_actions).data.normal_(0, self.policy_noise).to(self.device)
noise = noise.clamp(-self.noise_clip, self.noise_clip)
next_action = (next_action + noise).clamp(-self.max_action, self.max_action)
# Step 7: The two Critic targets take each the couple (s’, a’) as input and return two Q-values Qt1(s’,a’) and Qt2(s’,a’) as outputs
target_Q = 0
for critic in self.list_target_critic:
target_Q1, target_Q2 = critic(next_state, next_action)
target_Q += torch.min(target_Q1, target_Q2)
target_Q *= 1./ self.num_q_target
# Step 9: We get the final target of the two Critic models, which is: Qt = r + γ * min(Qt1, Qt2), where γ is the discount factor
target_Q = reward + ((1 - done) * self.discount * target_Q).detach()
# Step 10: The two Critic models take each the couple (s, a) as input and return two Q-values Q1(s,a) and Q2(s,a) as outputs
current_Q1, current_Q2 = self.critic(state, action)
# Step 11: We compute the loss coming from the two Critic models: Critic Loss = MSE_Loss(Q1(s,a), Qt) + MSE_Loss(Q2(s,a), Qt)
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Step 12: We backpropagate this Critic loss and update the parameters of the two Critic models with a SGD optimizer
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Step 13: Once every two iterations, we update our Actor model by performing gradient ascent on the output of the first Critic model
if it % self.policy_freq == 0:
# print("cuurent", self.currentQNet)
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
# Step 15: Still once every two iterations, we update the weights of the Critic target by polyak averaging
for param, target_param in zip(self.critic.parameters(), self.target_critic.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def hardupdate(self):
self.update_counter +=1
self.currentQNet = self.update_counter % self.num_q_target
# Step 15: Still once every two iterations, we update the weights of the Critic target by polyak averaging
for param, target_param in zip(self.target_critic.parameters(), self.list_target_critic[self.currentQNet].parameters()):
target_param.data.copy_(param.data)
# Making a save method to save a trained model
def save(self, filename, directory):
torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, filename))
torch.save(self.critic.state_dict(), '%s/%s_critic.pth' % (directory, filename))
# Making a load method to load a pre-trained model
def load(self, filename, directory):
self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, filename)))
self.critic.load_state_dict(torch.load('%s/%s_critic.pth' % (directory, filename)))
|
the-stack_106_15713
|
# Copyright 2016 Infinite Connection
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import forms
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from .models import *
class NewFileForm(forms.ModelForm):
class Meta:
model = File
fields = ['title', 'tags']
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class ChapterForm(forms.ModelForm):
class Meta:
model = Chapter
fields = ['title', 'position', 'content']
content = forms.CharField(label=_("Chapter content"), widget=forms.Textarea)
|
the-stack_106_15714
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from collections import defaultdict, OrderedDict
import logging
import os
import re
import torch
import traceback
from torch.serialization import default_restore_location
def torch_persistent_save(*args, **kwargs):
for i in range(3):
try:
return torch.save(*args, **kwargs)
except Exception:
if i == 2:
logging.error(traceback.format_exc())
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_state(filename, args, model, criterion, optimizer, lr_scheduler,
num_updates, optim_history=None, extra_state=None):
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
'args': args,
'model': model.state_dict() if model else {},
'optimizer_history': optim_history + [
{
'criterion_name': criterion.__class__.__name__,
'optimizer_name': optimizer.__class__.__name__,
'lr_scheduler_state': lr_scheduler.state_dict(),
'num_updates': num_updates,
}
],
'last_optimizer_state': convert_state_dict_type(optimizer.state_dict()),
'extra_state': extra_state,
}
torch_persistent_save(state_dict, filename)
def load_model_state(filename, model):
if not os.path.exists(filename):
return None, [], None
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
state = _upgrade_state_dict(state)
model.upgrade_state_dict(state['model'])
# load model parameters
try:
model.load_state_dict(state['model'], strict=True)
except Exception:
raise Exception('Cannot load model parameters from checkpoint, '
'please ensure that the architectures match')
return state['extra_state'], state['optimizer_history'], state['last_optimizer_state']
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
# add optimizer_history
if 'optimizer_history' not in state:
state['optimizer_history'] = [
{
'criterion_name': 'CrossEntropyCriterion',
'best_loss': state['best_loss'],
},
]
state['last_optimizer_state'] = state['optimizer']
del state['optimizer']
del state['best_loss']
# move extra_state into sub-dictionary
if 'epoch' in state and 'extra_state' not in state:
state['extra_state'] = {
'epoch': state['epoch'],
'batch_offset': state['batch_offset'],
'val_loss': state['val_loss'],
}
del state['epoch']
del state['batch_offset']
del state['val_loss']
# reduce optimizer history's memory usage (only keep the last state)
if 'optimizer' in state['optimizer_history'][-1]:
state['last_optimizer_state'] = state['optimizer_history'][-1]['optimizer']
for optim_hist in state['optimizer_history']:
del optim_hist['optimizer']
# record the optimizer class name
if 'optimizer_name' not in state['optimizer_history'][-1]:
state['optimizer_history'][-1]['optimizer_name'] = 'FairseqNAG'
# move best_loss into lr_scheduler_state
if 'lr_scheduler_state' not in state['optimizer_history'][-1]:
state['optimizer_history'][-1]['lr_scheduler_state'] = {
'best': state['optimizer_history'][-1]['best_loss'],
}
del state['optimizer_history'][-1]['best_loss']
# keep track of number of updates
if 'num_updates' not in state['optimizer_history'][-1]:
state['optimizer_history'][-1]['num_updates'] = 0
# old model checkpoints may not have separate source/target positions
if hasattr(state['args'], 'max_positions') and not hasattr(state['args'], 'max_source_positions'):
state['args'].max_source_positions = state['args'].max_positions
state['args'].max_target_positions = state['args'].max_positions
# use stateful training data iterator
if 'train_iterator' not in state['extra_state']:
state['extra_state']['train_iterator'] = {
'epoch': state['extra_state']['epoch'],
'iterations_in_epoch': state['extra_state'].get('batch_offset', 0),
}
return state
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
"""Load an ensemble of models for inference.
model_arg_overrides allows you to pass a dictionary model_arg_overrides --
{'arg_name': arg} -- to override model args that were used during model
training
"""
# load model architectures and weights
states = []
for filename in filenames:
if not os.path.exists(filename):
raise IOError('Model file not found: {}'.format(filename))
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
state = _upgrade_state_dict(state)
states.append(state)
ensemble = []
for state in states:
args = state['args']
if model_arg_overrides is not None:
args = _override_model_args(args, model_arg_overrides)
# build model for ensemble
model = task.build_model(args)
model.upgrade_state_dict(state['model'])
model.load_state_dict(state['model'], strict=True)
ensemble.append(model)
return ensemble, args
def _override_model_args(args, model_arg_overrides):
# Uses model_arg_overrides {'arg_name': arg} to override model args
for arg_name, arg_val in model_arg_overrides.items():
setattr(args, arg_name, arg_val)
return args
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_fairseq_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._fairseq_instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str):
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, 'r') as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
print("| Found {}/{} types in embedding file.".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor([float(weight) for weight in pieces[1:]])
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ['<eos>']
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return ' '.join(hypo_tokens)
def post_process_prediction(hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe):
from fairseq import tokenizer
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe)
if align_dict is not None:
hypo_str = replace_unk(hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string())
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tokenizer.Tokenizer.tokenize(hypo_str, tgt_dict, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx, left_pad, onnx_trace=False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
if onnx_trace:
range_buf = torch._dim_arange(like=tensor, dim=1) + padding_idx + 1
mask = tensor.ne(padding_idx)
positions = range_buf.expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
return positions * mask.long() + padding_idx * (1 - mask.long())
max_pos = padding_idx + 1 + tensor.size(1)
if not hasattr(make_positions, 'range_buf'):
make_positions.range_buf = tensor.new()
make_positions.range_buf = make_positions.range_buf.type_as(tensor)
if make_positions.range_buf.numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)
mask = tensor.ne(padding_idx)
positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
return tensor.clone().masked_scatter_(mask, positions[mask])
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, 'buf'):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(src_tokens, padding_idx, right_to_left=False, left_to_right=False):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
range = buffered_arange(max_len).type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, 'item'):
return tensor.item()
if hasattr(tensor, '__getitem__'):
return tensor[0]
return tensor
def clip_grad_norm_(tensor, max_norm):
grad_norm = item(torch.norm(tensor))
if grad_norm > max_norm > 0:
clip_coef = max_norm / (grad_norm + 1e-6)
tensor.mul_(clip_coef)
return grad_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def checkpoint_paths(path, pattern=r'checkpoint(\d+)\.pt'):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = int(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
else:
max_positions = tuple(
map(nullsafe_min, zip(max_positions, arg))
)
return max_positions
|
the-stack_106_15715
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
from pymatgen.apps.borg.hive import (
GaussianToComputedEntryDrone,
SimpleVaspToComputedEntryDrone,
VaspToComputedEntryDrone,
)
from pymatgen.entries.computed_entries import ComputedStructureEntry
from pymatgen.util.testing import PymatgenTest
class VaspToComputedEntryDroneTest(unittest.TestCase):
def setUp(self):
self.drone = VaspToComputedEntryDrone(data=["efermi"])
self.structure_drone = VaspToComputedEntryDrone(True)
def test_get_valid_paths(self):
for path in os.walk(PymatgenTest.TEST_FILES_DIR):
if path[0] == PymatgenTest.TEST_FILES_DIR:
self.assertTrue(len(self.drone.get_valid_paths(path)) > 0)
def test_assimilate(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
entry = self.drone.assimilate(PymatgenTest.TEST_FILES_DIR)
for p in ["hubbards", "is_hubbard", "potcar_spec", "run_type"]:
self.assertIn(p, entry.parameters)
self.assertAlmostEqual(entry.data["efermi"], -6.62148548)
self.assertEqual(entry.composition.reduced_formula, "Xe")
self.assertAlmostEqual(entry.energy, 0.5559329)
entry = self.structure_drone.assimilate(PymatgenTest.TEST_FILES_DIR)
self.assertEqual(entry.composition.reduced_formula, "Xe")
self.assertAlmostEqual(entry.energy, 0.5559329)
self.assertIsInstance(entry, ComputedStructureEntry)
self.assertIsNotNone(entry.structure)
# self.assertEqual(len(entry.parameters["history"]), 2)
def tearDown(self):
warnings.simplefilter("default")
def test_to_from_dict(self):
d = self.structure_drone.as_dict()
drone = VaspToComputedEntryDrone.from_dict(d)
self.assertEqual(type(drone), VaspToComputedEntryDrone)
class SimpleVaspToComputedEntryDroneTest(unittest.TestCase):
def setUp(self):
self.drone = SimpleVaspToComputedEntryDrone()
self.structure_drone = SimpleVaspToComputedEntryDrone(True)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_get_valid_paths(self):
for path in os.walk(PymatgenTest.TEST_FILES_DIR):
if path[0] == PymatgenTest.TEST_FILES_DIR:
self.assertTrue(len(self.drone.get_valid_paths(path)) > 0)
def test_to_from_dict(self):
d = self.structure_drone.as_dict()
drone = SimpleVaspToComputedEntryDrone.from_dict(d)
self.assertEqual(type(drone), SimpleVaspToComputedEntryDrone)
class GaussianToComputedEntryDroneTest(unittest.TestCase):
def setUp(self):
self.drone = GaussianToComputedEntryDrone(data=["corrections"])
self.structure_drone = GaussianToComputedEntryDrone(True)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_get_valid_paths(self):
for path in os.walk(os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules")):
if path[0] == os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules"):
self.assertTrue(len(self.drone.get_valid_paths(path)) > 0)
def test_assimilate(self):
test_file = os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules", "methane.log")
entry = self.drone.assimilate(test_file)
for p in [
"functional",
"basis_set",
"charge",
"spin_multiplicity",
"route_parameters",
]:
self.assertIn(p, entry.parameters)
for p in ["corrections"]:
self.assertIn(p, entry.data)
self.assertEqual(entry.composition.reduced_formula, "H4C")
self.assertAlmostEqual(entry.energy, -39.9768775602)
entry = self.structure_drone.assimilate(test_file)
self.assertEqual(entry.composition.reduced_formula, "H4C")
self.assertAlmostEqual(entry.energy, -39.9768775602)
self.assertIsInstance(entry, ComputedStructureEntry)
self.assertIsNotNone(entry.structure)
for p in ["properly_terminated", "stationary_type"]:
self.assertIn(p, entry.data)
def test_to_from_dict(self):
d = self.structure_drone.as_dict()
drone = GaussianToComputedEntryDrone.from_dict(d)
self.assertEqual(type(drone), GaussianToComputedEntryDrone)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
the-stack_106_15720
|
"""distutils.core
The only module that needs to be imported to use the Distutils; provides
the 'setup' function (which is to be called from the setup script). Also
indirectly provides the Distribution and Command classes, although they are
really defined in distutils.dist and distutils.cmd.
"""
__revision__ = "$Id: core.py 63335 2008-05-16 00:03:33Z alexandre.vassalotti $"
import sys, os
from distutils.debug import DEBUG
from distutils.errors import *
from distutils.util import grok_environment_error
# Mainly import these so setup scripts can "from distutils.core import" them.
from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.config import PyPIRCCommand
from distutils.extension import Extension
# This is a barebones help message generated displayed when the user
# runs the setup script with no arguments at all. More useful help
# is generated with various --help options: global help, list commands,
# and per-command help.
USAGE = """\
usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
or: %(script)s --help [cmd1 cmd2 ...]
or: %(script)s --help-commands
or: %(script)s cmd --help
"""
def gen_usage (script_name):
script = os.path.basename(script_name)
return USAGE % vars()
# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
_setup_stop_after = None
_setup_distribution = None
# Legal keyword arguments for the setup() function
setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
'name', 'version', 'author', 'author_email',
'maintainer', 'maintainer_email', 'url', 'license',
'description', 'long_description', 'keywords',
'platforms', 'classifiers', 'download_url',
'requires', 'provides', 'obsoletes',
)
# Legal keyword arguments for the Extension constructor
extension_keywords = ('name', 'sources', 'include_dirs',
'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'swig_opts', 'export_symbols', 'depends', 'language')
def setup (**attrs):
"""The gateway to the Distutils: do everything your setup script needs
to do, in a highly flexible and user-driven way. Briefly: create a
Distribution instance; find and parse config files; parse the command
line; run each Distutils command found there, customized by the options
supplied to 'setup()' (as keyword arguments), in config files, and on
the command line.
The Distribution instance might be an instance of a class supplied via
the 'distclass' keyword argument to 'setup'; if no such class is
supplied, then the Distribution class (in dist.py) is instantiated.
All other arguments to 'setup' (except for 'cmdclass') are used to set
attributes of the Distribution instance.
The 'cmdclass' argument, if supplied, is a dictionary mapping command
names to command classes. Each command encountered on the command line
will be turned into a command class, which is in turn instantiated; any
class found in 'cmdclass' is used in place of the default, which is
(for command 'foo_bar') class 'foo_bar' in module
'distutils.command.foo_bar'. The command class must provide a
'user_options' attribute which is a list of option specifiers for
'distutils.fancy_getopt'. Any command-line options between the current
and the next command are used to set attributes of the current command
object.
When the entire command-line has been successfully parsed, calls the
'run()' method on each command object in turn. This method will be
driven entirely by the Distribution object (which each command object
has a reference to, thanks to its constructor), and the
command-specific options that became attributes of each command
object.
"""
global _setup_stop_after, _setup_distribution
# Determine the distribution class -- either caller-supplied or
# our Distribution (see below).
klass = attrs.get('distclass')
if klass:
del attrs['distclass']
else:
klass = Distribution
if 'script_name' not in attrs:
attrs['script_name'] = os.path.basename(sys.argv[0])
if 'script_args' not in attrs:
attrs['script_args'] = sys.argv[1:]
# Create the Distribution instance, using the remaining arguments
# (ie. everything except distclass) to initialize it
try:
_setup_distribution = dist = klass(attrs)
except DistutilsSetupError as msg:
if 'name' not in attrs:
raise SystemExit("error in setup command: %s" % msg)
else:
raise SystemExit("error in %s setup command: %s" % \
(attrs['name'], msg))
if _setup_stop_after == "init":
return dist
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
if DEBUG:
print("options (after parsing config files):")
dist.dump_option_dicts()
if _setup_stop_after == "config":
return dist
# Parse the command line; any command-line errors are the end user's
# fault, so turn them into SystemExit to suppress tracebacks.
try:
ok = dist.parse_command_line()
except DistutilsArgError as msg:
raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg)
if DEBUG:
print("options (after parsing command line):")
dist.dump_option_dicts()
if _setup_stop_after == "commandline":
return dist
# And finally, run all the commands found on the command line.
if ok:
try:
dist.run_commands()
except KeyboardInterrupt:
raise SystemExit("interrupted")
except (IOError, os.error) as exc:
error = grok_environment_error(exc)
if DEBUG:
sys.stderr.write(error + "\n")
raise
else:
raise SystemExit(error)
except (DistutilsError,
CCompilerError) as msg:
if DEBUG:
raise
else:
raise SystemExit("error: " + str(msg))
return dist
# setup ()
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
return the Distribution instance that drives things. This is useful
if you need to find out the distribution meta-data (passed as
keyword args from 'script' to 'setup()', or the contents of the
config files or command-line.
'script_name' is a file that will be read and run with 'exec()';
'sys.argv[0]' will be replaced with 'script' for the duration of the
call. 'script_args' is a list of strings; if supplied,
'sys.argv[1:]' will be replaced by 'script_args' for the duration of
the call.
'stop_after' tells 'setup()' when to stop processing; possible
values:
init
stop after the Distribution instance has been created and
populated with the keyword arguments to 'setup()'
config
stop after config files have been parsed (and their data
stored in the Distribution instance)
commandline
stop after the command-line ('sys.argv[1:]' or 'script_args')
have been parsed (and the data stored in the Distribution)
run [default]
stop after all commands have been run (the same as if 'setup()'
had been called in the usual way
Returns the Distribution instance, which provides all information
used to drive the Distutils.
"""
if stop_after not in ('init', 'config', 'commandline', 'run'):
raise ValueError("invalid value for 'stop_after': %r" % (stop_after,))
global _setup_stop_after, _setup_distribution
_setup_stop_after = stop_after
save_argv = sys.argv
g = {'__file__': script_name}
l = {}
try:
try:
sys.argv[0] = script_name
if script_args is not None:
sys.argv[1:] = script_args
exec(open(script_name).read(), g, l)
finally:
sys.argv = save_argv
_setup_stop_after = None
except SystemExit:
# Hmm, should we do something if exiting with a non-zero code
# (ie. error)?
pass
except:
raise
if _setup_distribution is None:
raise RuntimeError(("'distutils.core.setup()' was never called -- "
"perhaps '%s' is not a Distutils setup script?") % \
script_name)
# I wonder if the setup script's namespace -- g and l -- would be of
# any interest to callers?
#print "_setup_distribution:", _setup_distribution
return _setup_distribution
# run_setup ()
|
the-stack_106_15722
|
# Copyright (c) 2015 Rackspace, Inc.
# Copyright (c) 2015 Hewlett Packard Enterprise
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
r"""
==============
HTML formatter
==============
This formatter outputs the issues as HTML.
:Example:
.. code-block:: html
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>
Bandit Report
</title>
<style>
html * {
font-family: "Arial", sans-serif;
}
pre {
font-family: "Monaco", monospace;
}
.bordered-box {
border: 1px solid black;
padding-top:.5em;
padding-bottom:.5em;
padding-left:1em;
}
.metrics-box {
font-size: 1.1em;
line-height: 130%;
}
.metrics-title {
font-size: 1.5em;
font-weight: 500;
margin-bottom: .25em;
}
.issue-description {
font-size: 1.3em;
font-weight: 500;
}
.candidate-issues {
margin-left: 2em;
border-left: solid 1px; LightGray;
padding-left: 5%;
margin-top: .2em;
margin-bottom: .2em;
}
.issue-block {
border: 1px solid LightGray;
padding-left: .5em;
padding-top: .5em;
padding-bottom: .5em;
margin-bottom: .5em;
}
.issue-sev-high {
background-color: Pink;
}
.issue-sev-medium {
background-color: NavajoWhite;
}
.issue-sev-low {
background-color: LightCyan;
}
</style>
</head>
<body>
<div id="metrics">
<div class="metrics-box bordered-box">
<div class="metrics-title">
Metrics:<br>
</div>
Total lines of code: <span id="loc">9</span><br>
Total lines skipped (#nosec): <span id="nosec">0</span>
</div>
</div>
<br>
<div id="results">
<div id="issue-0">
<div class="issue-block issue-sev-medium">
<b>yaml_load: </b> Use of unsafe yaml load. Allows
instantiation of arbitrary objects. Consider yaml.safe_load().<br>
<b>Test ID:</b> B506<br>
<b>Severity: </b>MEDIUM<br>
<b>Confidence: </b>HIGH<br>
<b>File: </b><a href="examples/yaml_load.py"
target="_blank">examples/yaml_load.py</a> <br>
<b>More info: </b><a href="https://docs.openstack.org/developer/bandit/
plugins/yaml_load.html" target="_blank">
https://docs.openstack.org/developer/bandit/plugins/yaml_load.html</a>
<br>
<div class="code">
<pre>
5 ystr = yaml.dump({'a' : 1, 'b' : 2, 'c' : 3})
6 y = yaml.load(ystr)
7 yaml.dump(y)
</pre>
</div>
</div>
</div>
</div>
</body>
</html>
.. versionadded:: 0.14.0
"""
import cgi
import logging
import sys
from bandit.core import docs_utils
from bandit.core import test_properties
from bandit.formatters import utils
LOG = logging.getLogger(__name__)
@test_properties.accepts_baseline
def report(manager, fileobj, sev_level, conf_level, lines=-1):
"""Writes issues to 'fileobj' in HTML format
:param manager: the bandit manager object
:param fileobj: The output file object, which may be sys.stdout
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
"""
header_block = u"""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>
Bandit Report
</title>
<style>
html * {
font-family: "Arial", sans-serif;
}
pre {
font-family: "Monaco", monospace;
}
.bordered-box {
border: 1px solid black;
padding-top:.5em;
padding-bottom:.5em;
padding-left:1em;
}
.metrics-box {
font-size: 1.1em;
line-height: 130%;
}
.metrics-title {
font-size: 1.5em;
font-weight: 500;
margin-bottom: .25em;
}
.issue-description {
font-size: 1.3em;
font-weight: 500;
}
.candidate-issues {
margin-left: 2em;
border-left: solid 1px; LightGray;
padding-left: 5%;
margin-top: .2em;
margin-bottom: .2em;
}
.issue-block {
border: 1px solid LightGray;
padding-left: .5em;
padding-top: .5em;
padding-bottom: .5em;
margin-bottom: .5em;
}
.issue-sev-high {
background-color: Pink;
}
.issue-sev-medium {
background-color: NavajoWhite;
}
.issue-sev-low {
background-color: LightCyan;
}
</style>
</head>
"""
report_block = u"""
<body>
{metrics}
{skipped}
<br>
<div id="results">
{results}
</div>
</body>
</html>
"""
issue_block = u"""
<div id="issue-{issue_no}">
<div class="issue-block {issue_class}">
<b>{test_name}: </b> {test_text}<br>
<b>Test ID:</b> {test_id}<br>
<b>Severity: </b>{severity}<br>
<b>Confidence: </b>{confidence}<br>
<b>File: </b><a href="{path}" target="_blank">{path}</a> <br>
<b>More info: </b><a href="{url}" target="_blank">{url}</a><br>
{code}
{candidates}
</div>
</div>
"""
code_block = u"""
<div class="code">
<pre>
{code}
</pre>
</div>
"""
candidate_block = u"""
<div class="candidates">
<br>
<b>Candidates: </b>
{candidate_list}
</div>
"""
candidate_issue = u"""
<div class="candidate">
<div class="candidate-issues">
<pre>{code}</pre>
</div>
</div>
"""
skipped_block = u"""
<br>
<div id="skipped">
<div class="bordered-box">
<b>Skipped files:</b><br><br>
{files_list}
</div>
</div>
"""
metrics_block = u"""
<div id="metrics">
<div class="metrics-box bordered-box">
<div class="metrics-title">
Metrics:<br>
</div>
Total lines of code: <span id="loc">{loc}</span><br>
Total lines skipped (#nosec): <span id="nosec">{nosec}</span>
</div>
</div>
"""
issues = manager.get_issue_list(sev_level=sev_level, conf_level=conf_level)
baseline = not isinstance(issues, list)
# build the skipped string to insert in the report
skipped_str = ''.join('%s <b>reason:</b> %s<br>' % (fname, reason)
for fname, reason in manager.get_skipped())
if skipped_str:
skipped_text = skipped_block.format(files_list=skipped_str)
else:
skipped_text = ''
# build the results string to insert in the report
results_str = ''
for index, issue in enumerate(issues):
if not baseline or len(issues[issue]) == 1:
candidates = ''
safe_code = cgi.escape(issue.get_code(lines, True).
strip('\n').lstrip(' '))
code = code_block.format(code=safe_code)
else:
candidates_str = ''
code = ''
for candidate in issues[issue]:
candidate_code = cgi.escape(candidate.get_code(lines, True).
strip('\n').lstrip(' '))
candidates_str += candidate_issue.format(code=candidate_code)
candidates = candidate_block.format(candidate_list=candidates_str)
url = docs_utils.get_url(issue.test_id)
results_str += issue_block.format(issue_no=index,
issue_class='issue-sev-{}'.
format(issue.severity.lower()),
test_name=issue.test,
test_id=issue.test_id,
test_text=issue.text,
severity=issue.severity,
confidence=issue.confidence,
path=issue.fname, code=code,
candidates=candidates,
url=url)
# build the metrics string to insert in the report
metrics_summary = metrics_block.format(
loc=manager.metrics.data['_totals']['loc'],
nosec=manager.metrics.data['_totals']['nosec'])
# build the report and output it
report_contents = report_block.format(metrics=metrics_summary,
skipped=skipped_text,
results=results_str)
with fileobj:
wrapped_file = utils.wrap_file_object(fileobj)
wrapped_file.write(utils.convert_file_contents(header_block))
wrapped_file.write(utils.convert_file_contents(report_contents))
if fileobj.name != sys.stdout.name:
LOG.info("HTML output written to file: %s", fileobj.name)
|
the-stack_106_15723
|
import textwrap
def construct_invoice_json(invoice):
NS = 'http://www.w3.org/2001/XMLSchema-instance'
data = {
'_name': 'IzdaniRacunEnostavni',
'_attrs': [("{%s}noNamespaceSchemaLocation" % NS, 'http://www.gzs.si/e-poslovanje/sheme/eSLOG_1-6_EnostavniRacun.xsd')],
'_ns': {
'ds': 'http://www.w3.org/2000/09/xmldsig#',
'xsd': 'http://uri.etsi.org/01903/v1.1.1#',
'xsi': NS
},
'invoice': {
'_name': 'Racun',
'_attrs': [('Id', 'data')],
'header': construct_header_data(invoice),
'date_issued': construct_date_data(invoice.date_issued_code, invoice.date_issued),
'date_of_service': construct_date_data(invoice.date_of_service_code, invoice.date_of_service),
'currency': construct_currency_data(invoice.currency),
'location': construct_location_data(invoice.location_code, invoice.location_address),
'issuer': construct_company_data(invoice.issuer, 'II'),
'buyer': construct_company_data(invoice.recipient, 'BY'),
'recipient': construct_company_data(invoice.recipient, 'IV'),
'payment_terms': construct_payment_terms_data(invoice.date_due_code, invoice.date_due),
'reference_data': construct_reference_data(invoice.total_with_tax, invoice.payment_reference),
} # end invoice
}
for i, reference_document in enumerate(invoice.reference_documents):
data['invoice'][f"reference_document_{i}"] = construct_reference_document_data(reference_document)
if invoice.global_discount_amount:
data['invoice']['global_discount'] = construct_global_discount_data(invoice.global_discount_amount, invoice.global_discount_percentage)
if invoice.intro_text:
data['invoice']['intro_text'] = construct_custom_text_data('AAI', 'GLAVA_TEKST', invoice.intro_text)
for i, item in enumerate(invoice.document_items):
data['invoice'][f"item_{i}"] = construct_item_data(item)
for i, ts in enumerate(invoice.tax_summaries):
data['invoice'][f"tax_summary_{i}"] = construct_tax_summary_data(ts)
# add final sums to invoice
# Total without discount
data['invoice']['sums_without_discounts'] = construct_sums_data(amount=invoice.total_without_discount, sum_type='79')
# Discounts amount
data['invoice']['sums_discounts'] = construct_sums_data(amount=invoice.total_without_discount - invoice.total_without_tax, sum_type='53')
# Tax base sums
data['invoice']['sums_tax_base_amount'] = construct_sums_data(amount=invoice.total_without_tax, sum_type='125')
# Taxes amount
data['invoice']['sums_taxes'] = construct_sums_data(amount=invoice.total_with_tax - invoice.total_without_tax, sum_type='176')
# Total amount - with taxes
data['invoice']['sums_total_amount'] = construct_sums_data(amount=invoice.total_with_tax, sum_type='86')
if invoice.outro_text:
data['invoice']['outro_text'] = construct_custom_text_data('AAI', 'DODATNI_TEKST', invoice.outro_text)
return data
def construct_header_data(invoice):
header = {
'_name': 'GlavaRacuna',
'invoice_type': {
'_name': 'VrstaRacuna',
'_value': invoice.invoice_type,
},
'invoice_number': {
'_name': 'StevilkaRacuna',
'_value': invoice.invoice_number,
},
'invoice_function': {
'_name': 'FunkcijaRacuna',
'_value': invoice.invoice_function,
},
'payment_type': {
'_name': 'NacinPlacila',
'_value': invoice.payment_type,
},
'payment_purpose': {
'_name': 'KodaNamena',
'_value': invoice.payment_purpose
}
}
return header
def construct_date_data(date_code, date):
date = {
'_name': 'DatumiRacuna',
'date_type': {
'_name': 'VrstaDatuma',
'_value': date_code
},
'date': {
'_name': 'DatumRacuna',
'_value': date.isoformat()
}
}
return date
def construct_currency_data(currency):
currency = {
'_name': 'Valuta',
'currency_type': {
'_name': 'VrstaValuteRacuna',
'_value': '2',
},
'currency_code': {
'_name': 'KodaValute',
'_value': currency,
},
}
return currency
def construct_location_data(location_code, location_address):
location = {
'_name': 'Lokacije',
'location_code': {
'_name': 'VrstaLokacije',
'_value': location_code,
},
'location_name': {
'_name': 'NazivLokacije',
'_value': location_address,
},
}
return location
def construct_company_data(business, business_type='II'):
data = {
'_name': 'PodatkiPodjetja',
'info': {
'_name': 'NazivNaslovPodjetja',
'type': {
'_name': 'VrstaPartnerja',
'_value': business_type
},
'name': {
'_name': 'NazivPartnerja',
},
'address': {
'_name': 'Ulica',
},
'city': {
'_name': 'Kraj',
'_value': business.city
},
'country': {
'_name': 'NazivDrzave',
'_value': business.country
},
'zip_code': {
'_name': 'PostnaStevilka',
'_value': str(business.zip_code)
},
'country_code': {
'_name': 'KodaDrzave',
'_value': business.country_iso_code
},
}
}
# Add business name
business_name_split = textwrap.wrap(business.name, 35, break_long_words=True)
for i, bn_part in enumerate(business_name_split):
i = i + 1 # Start from 1
data['info']['name'][f"part_{i}"] = {
'_name': f"NazivPartnerja{i}",
'_value': bn_part
}
if i == 4:
break # Stop at max length
# Add business name
addr_split = textwrap.wrap(business.address, 35, break_long_words=True)
for i, addr_part in enumerate(addr_split):
i = i + 1 # Start from 1
data['info']['address'][f"part_{i}"] = {
'_name': f"Ulica{i}",
'_value': addr_part
}
if i == 4:
break # Stop at max length
if business.iban:
data['financial_info'] = {
'_name': 'FinancniPodatkiPodjetja',
'bank_account_info': {
'_name': 'BancniRacun',
'iban': {
'_name': 'StevilkaBancnegaRacuna',
'_value': business.iban
},
'bic': {
'_name': 'BIC',
'_value': business.bic
}
}
}
if business.vat_id:
data['vat_id'] = {
'_name': 'ReferencniPodatkiPodjetja',
'type': {
'_name': 'VrstaPodatkaPodjetja',
'_value': 'VA'
},
'vat': {
'_name': 'PodatekPodjetja',
'_value': str(business.vat_id)
}
}
if business.registration_number:
data['registration_number'] = {
'_name': 'ReferencniPodatkiPodjetja',
'type': {
'_name': 'VrstaPodatkaPodjetja',
'_value': 'GN'
},
'vat': {
'_name': 'PodatekPodjetja',
'_value': str(business.registration_number)
}
}
return data
def construct_payment_terms_data(date_due_code, date_due):
payment_terms = {
'_name': 'PlacilniPogoji',
'term_data': {
'_name': 'PodatkiORokih',
'term_code': {
'_name': 'VrstaPogoja',
'_value': '3',
}
},
'term_due': {
'_name': 'PlacilniRoki',
'term_due_type': {
'_name': 'VrstaDatumaPlacilnegaRoka',
'_value': date_due_code,
},
'term_due_date': {
'_name': 'Datum',
'_value': date_due.isoformat(),
},
}
}
return payment_terms
def construct_reference_data(total_with_tax, payment_reference):
reference = {
'_name': 'PovzetekZneskovRacuna',
'invoice_amounts': {
'_name': 'ZneskiRacuna',
'type': {
'_name': 'VrstaZneska',
'_value': '9' # Amount to be paid
},
'amount': {
'_name': 'ZnesekRacuna',
'_value': str(total_with_tax)
}
},
'reference': {
'_name': 'SklicZaPlacilo',
'ref_type': {
'_name': 'SklicPlacila',
'_value': 'PQ'
},
'ref_number': {
'_name': 'StevilkaSklica',
'_value': payment_reference
}
}
}
return reference
def construct_reference_document_data(reference_document):
reference_doc_data = {
'_name': 'ReferencniDokumenti',
'_attrs': [('VrstaDokumenta', reference_document.type_code)],
'document_number': {
'_name': "StevilkaDokumenta",
'_value': reference_document.document_number
}
}
return reference_doc_data
def construct_global_discount_data(discount_amount, discount_percentage):
discount_data = {
'_name': 'GlobalniPopusti',
'description': {
'_name': 'OpisPopusta',
'_value': 'SKUPNI POPUST',
},
'type': {
'_name': 'TipPopusta',
'_value': 'PP',
},
'percentage': {
'_name': 'OdstotekPopusta',
'_value': str(discount_percentage)
},
'amount': {
'_name': 'ZnesekPopusta',
'_value': str(discount_amount)
},
}
return discount_data
def construct_custom_text_data(text_format, text_type, text):
"""
Text must be split into 70 chars long strings.
:param text_format: AAI or other predefined formats
:param text_type:
:param text:
:return:
"""
text_split = textwrap.wrap(text, 70, break_long_words=True)
custom_text = {
'_name': 'PoljubnoBesedilo',
'format': {
'_name': 'VrstaBesedila',
'_value': text_format,
},
'content': {
'_name': 'Besedilo',
'text_1': {
'_name': 'Tekst1',
'_value': text_type,
},
}
}
for i, txt in enumerate(text_split):
# Since text_1 is used for text_type we must enumerate from 2 onwards
i = i + 2
custom_text['content'][f"text_{i}"] = {
'_name': f"Tekst{i}",
'_value': txt
}
# Stop the loop if index is 5 - we can't place any more than that in XML.
if i == 5:
break
return custom_text
def construct_item_data(item):
data = {
'_name': 'PostavkeRacuna',
'info': {
'_name': 'Postavka',
'row_num': {
'_name': 'StevilkaVrstice',
'_value': str(item.row_number)
}
},
'description': {
'_name': 'OpisiArtiklov',
'code': {
'_name': 'KodaOpisaArtikla',
'_value': 'F',
},
'name': {
'_name': 'OpisArtikla',
'desc_1': {
'_name': 'OpisArtikla1',
'_value': item.item_name[:35] # Only 35 chars...
}
}
},
'quantity': {
'_name': 'KolicinaArtikla',
'qty_type': {
'_name': 'VrstaKolicine',
'_value': item.quantity_type,
},
'qty': {
'_name': 'Kolicina',
'_value': str(item.quantity)
},
'unit': {
'_name': 'EnotaMere',
'_value': item.unit
}
},
'value_before_discount': {
'_name': 'ZneskiPostavke',
'type': {
'_name': 'VrstaZneskaPostavke',
'_value': '203' # Total before discount
},
'amount': {
'_name': 'ZnesekPostavke',
'_value': "%.2f" % (item.price_without_tax * item.quantity)
}
},
'value_total': {
'_name': 'ZneskiPostavke',
'type': {
'_name': 'VrstaZneskaPostavke',
'_value': '38' # Total with discount
},
'amount': {
'_name': 'ZnesekPostavke',
'_value': str(item.total_with_tax)
}
},
'price': {
'_name': 'CenaPostavke',
'value': {
'_name': 'Cena',
'_value': str(item.price_without_tax)
}
},
'tax_info': {
'_name': 'DavkiPostavke',
'taxes': {
'_name': 'DavkiNaPostavki',
'type': {
'_name': 'VrstaDavkaPostavke',
'_value': 'VAT'
},
'vat_percentage': {
'_name': 'OdstotekDavkaPostavke',
'_value': str(item.tax_rate)
}
},
'tax_amounts_base': {
'_name': 'ZneskiDavkovPostavke',
'type': {
'_name': 'VrstaZneskaDavkaPostavke',
'_value': '125'
},
'amount': {
'_name': 'Znesek',
'_value': str(item.total_without_tax)
}
},
'tax_amounts_tax': {
'_name': 'ZneskiDavkovPostavke',
'type': {
'_name': 'VrstaZneskaDavkaPostavke',
'_value': '124'
},
'amount': {
'_name': 'Znesek',
'_value': str(item.total_with_tax - item.total_without_tax)
}
}
}
}
if item.discount_percentage:
data['discount'] = {
'_name': 'OdstotkiPostavk',
'identification': {
'_name': 'Identifikator',
'_value': 'A', # Discount
},
'type': {
'_name': 'VrstaOdstotkaPostavke',
'_value': '12', # Discount
},
'percentage': {
'_name': 'OdstotekPostavke',
'_value': str(item.discount_percentage)
},
'type_amount': {
'_name': 'VrstaZneskaOdstotka',
'_value': '204'
},
'amount': {
'_name': 'ZnesekOdstotka',
'_value': str(item.discount_amount)
}
}
return data
def construct_tax_summary_data(tax_summary):
data = {
'_name': 'PovzetekDavkovRacuna',
'summary': {
'_name': 'DavkiRacuna',
'tax_type': {
'_name': 'VrstaDavka',
'_value': 'VAT'
},
'tax_percentage': {
'_name': 'OdstotekDavka',
'_value': str(tax_summary.tax_rate)
},
},
'amount_base': {
'_name': 'ZneskiDavkov',
'type': {
'_name': 'VrstaZneskaDavka',
'_value': '125' # Osnova
},
'amount': {
'_name': 'ZnesekDavka',
'_value': str(tax_summary.tax_base)
}
},
'amount_tax': {
'_name': 'ZneskiDavkov',
'type': {
'_name': 'VrstaZneskaDavka',
'_value': '124' # Tax amount
},
'amount': {
'_name': 'ZnesekDavka',
'_value': str(tax_summary.tax_amount)
}
}
}
return data
def construct_sums_data(amount, sum_type, ref=None):
data = {
'_name': 'PovzetekZneskovRacuna',
'amounts': {
'_name': 'ZneskiRacuna',
'type': {
'_name': 'VrstaZneska',
'_value': str(sum_type)
},
'amount': {
'_name': 'ZnesekRacuna',
'_value': "%.2f" % amount
}
},
'ref': {
'_name': 'SklicZaPlacilo',
'ref_type': {
'_name': 'SklicPlacila',
'_value': 'PQ'
}
}
}
if ref is not None:
data['ref']['ref_number'] = {
'_name': 'StevilkaSklica',
'_value': ref
}
return data
|
the-stack_106_15724
|
#!/usr/bin/env python
"""
LDD Corral
Tool to corral all the LDDs
"""
import logging
import os
import sys
import traceback
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from shutil import rmtree
from github3 import login
from subprocess import Popen, CalledProcessError, PIPE, STDOUT
from pds_github_util.branches.git_actions import clone_checkout_branch
GIT_URL_BASE = '[email protected]:{}.git'
BASE_DIR = '/tmp'
BASE_REPO = 'pds-template-repo-java'
IGNORE_REPOS = ['PDS-Data-Dictionaries.github.io', 'PDS4-LDD-Issue-Repo']
GITHUB_ORG = 'NASA-PDS'
# Quiet github3 logging
logger = logging.getLogger('github3')
logger.setLevel(level=logging.WARNING)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def run_cmd(cmd):
with Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) as p:
for line in p.stdout:
print(line, end='') # process line here
if p.returncode != 0:
raise CalledProcessError(p.returncode, p.args)
def cleanup_dir(path):
if os.path.isdir(path):
rmtree(path)
def clone_repo(path, git_url):
cleanup_dir(path)
# clone the repo
cmd = [f'git clone {git_url} {path}']
run_cmd(cmd)
def update_action(token, gh, args, base_repo):
"""Update Github Actions
Loop through all repos and update their github actions with the template
"""
for _repo in gh.repositories_by(args.github_org):
if _repo.name != base_repo and _repo.name not in IGNORE_REPOS:
logger.info(f'updating {_repo.name}')
output_dir = os.path.join(BASE_DIR, _repo.name)
clone_repo(output_dir, _repo.ssh_url)
# update action
cmd = [f'./update_action.sh {os.path.join(BASE_DIR, args.base_repo)} {output_dir} {args.token}']
run_cmd(cmd)
def update_templates(token, gh, args, base_repo):
"""Update Github Issue Templates.
Loop through all repos and update their github issue templates with the template repo
"""
for _repo in gh.repositories_by(args.github_org):
if _repo.name != base_repo and _repo.name not in IGNORE_REPOS:
logger.info(f'updating {_repo.name}')
output_dir = os.path.join(BASE_DIR, _repo.name)
clone_repo(output_dir, _repo.ssh_url)
# update action
cmd = [f'./update_templates.sh {os.path.join(BASE_DIR, args.base_repo)} {output_dir} {args.token}']
run_cmd(cmd)
def main():
"""main"""
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument('--github_org',
help=('github org to update repos.'),
default=GITHUB_ORG)
parser.add_argument('--update_templates',
help='Update issue templates',
action='store_true', default=False)
parser.add_argument('--update_action',
help='Update repo actions',
action='store_true', default=False)
parser.add_argument('--base_repo',
help='base repo to copy config from',
default=BASE_REPO)
parser.add_argument('--token',
help='github token.')
args = parser.parse_args()
token = args.token or os.environ.get('GITHUB_TOKEN')
if not token:
logger.error(f'Github token must be provided or set as environment variable (GITHUB_TOKEN).')
sys.exit(1)
try:
# connect to github
gh = login(token=token)
base_repo = args.github_org + '/' + args.base_repo
logger.info(f'clone base repo {base_repo}')
output_dir = os.path.join(BASE_DIR, args.base_repo)
clone_repo(output_dir, GIT_URL_BASE.format(base_repo))
if args.update_action:
update_action(token, gh, args, base_repo)
if args.update_templates:
update_templates(token, gh, args, base_repo)
except Exception as e:
traceback.print_exc()
sys.exit(1)
logger.info(f'SUCCESS: Execution complete.')
if __name__ == '__main__':
main()
|
the-stack_106_15726
|
import os
from sqlalchemy import exc
from flask import Flask, jsonify, request, Blueprint
from project.api.models import Stocks
from project.api.utils import authenticate
from project import db
import datetime
from dateutil import parser
stocks_blueprint = Blueprint('stocks', __name__)
@stocks_blueprint.route('/stocks/', methods=['POST'])
@authenticate
def post_stocks(resp):
if not resp['data']:
response_object = {
'status': 'error',
'message': 'You do not have permission to do that.'
}
return jsonify(response_object), 401
# get post data
post_data = request.get_json()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
if not post_data:
return jsonify(response_object), 400
stock_name = post_data.get('stock_name')
opening_price = post_data.get('opening_price')
highest_price = post_data.get('highest_price')
lowest_price = post_data.get('lowest_price')
closing_price = post_data.get('closing_price')
date = post_data.get('date')
try:
stocks = Stocks(
stock_name=stock_name, opening_price=opening_price,
highest_price= highest_price, lowest_price=lowest_price,
closing_price=closing_price,
date=date
)
db.session.add(stocks)
db.session.commit()
response_object = {
'status': 'success',
'message': 'New stocks was recorded',
'stocks': stocks.to_json()
}
return jsonify(response_object), 201
except (exc.IntegrityError, exc.DataError, ValueError) as e:
db.session().rollback()
response_object = {
'status': 'fail',
'message': 'Invalid data type in the payload'
}
return jsonify(response_object), 400
@stocks_blueprint.route('/stocks/', methods=['GET'])
def get_stocks():
begin_date = request.args.get('begin_date')
end_date = request.args.get('end_date')
if (begin_date and end_date):
try:
parsed_begin = parser.parse(begin_date)
parsed_end = parser.parse(end_date)
except:
return jsonify({
'status': 'fail',
'message': 'Invalid date'
}), 400
stocks = Stocks.query.filter(Stocks.date.between(parsed_begin, parsed_end)).all()
else:
stocks = Stocks.query.all()
results = []
for stock in stocks:
results.append({
'stocks': stock.to_json()
})
return jsonify({
'status': 'success',
'data': results
}), 200
|
the-stack_106_15728
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from .torchvision import ImageTorchEncoder
import numpy as np
class CustomImageTorchEncoder(ImageTorchEncoder):
"""
:class:`CustomImageTorchEncoder` encodes data from a ndarray, potentially B x (Channel x Height x Width) into a
ndarray of `B x D`.
Internally, :class:`CustomImageTorchEncoder` wraps any custom torch model not part of models from `torchvision.models`.
https://pytorch.org/docs/stable/torchvision/models.html
"""
def __init__(self, model_path: str, layer_name: str, *args, **kwargs):
"""
:param model_path: the path where the model is stored.
:layer: Name of the layer from where to extract the feature map.
"""
super().__init__(*args, **kwargs)
self.model_path = model_path
self.layer_name = layer_name
def post_init(self):
import torch
if self.pool_strategy is not None:
self.pool_fn = getattr(np, self.pool_strategy)
self.model = torch.load(self.model_path)
self.model.eval()
self.to_device(self.model)
self.layer = getattr(self.model, self.layer_name)
def _get_features(self, data):
feature_map = None
def get_activation(model, input, output):
nonlocal feature_map
feature_map = output.detach()
handle = self.layer.register_forward_hook(get_activation)
self.model(data)
handle.remove()
return feature_map
|
the-stack_106_15729
|
import os
# from pystage.util import stderr_redirector
import sys
import io
import pygame
import pkg_resources
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPM
import pystage
_round = lambda v: pygame.Vector2(round(v.x), round(v.y))
class CostumeManager():
ALL_AROUND = 1
LEFT_RIGHT = 2
NO_ROTATION = 3
def __init__(self, owner):
self.owner = owner
self.costumes = []
self.current_costume = -1
self.rotation_style = CostumeManager.ALL_AROUND
def add_costume(self, name, center_x=None, center_y=None, factor=1):
if isinstance(name, str):
costume = Costume(self, name, center_x, center_y, factor)
self.costumes.append(costume)
if self.current_costume==-1:
self.current_costume = len(self.costumes) - 1
self.update_sprite_image()
else:
for n in name:
self.add_costume(n)
def replace_costume(self, index, name, center_x=None, center_y=None, factor=1):
costume = Costume(self, name, center_x, center_y, factor)
del self.costumes[index]
self.costumes.insert(index, costume)
self.update_sprite_image()
def insert_costume(self, index, name, center_x=None, center_y=None, factor=1):
costume = Costume(self, name, center_x, center_y, factor)
self.costumes.insert(index, costume)
self.update_sprite_image()
def next_costume(self):
if self.current_costume == -1:
return
self.current_costume += 1
if self.current_costume == len(self.costumes):
self.current_costume = 0
self.update_sprite_image()
def switch_costume(self, name):
for i, costume in enumerate(self.costumes):
if costume.name.lower().strip() == name.lower().strip():
self.current_costume = i
self.update_sprite_image()
return
def update_sprite_image(self):
if isinstance(self.owner, pystage.core.CoreStage):
return
image, new_center = self.rotate_and_scale()
self.owner.image = image
self.owner.mask = pygame.mask.from_surface(image)
self.owner.rect = image.get_rect()
self.owner.rect.topleft = _round(self.owner._pos) - new_center
new_center = _round(new_center)
if self.owner.stage.show_sprite_boundaries:
image.blit(self.owner.mask.to_surface(), (0,0))
pygame.draw.rect(image, "red", image.get_rect(), 1)
pygame.draw.line(image, "red", new_center - (10, 0), new_center + (10, 0), 1)
pygame.draw.line(image, "red", new_center - (0, 10), new_center + (0, 10), 1)
def get_image(self):
if self.current_costume == -1:
return None
return self.costumes[self.current_costume].image
def get_costume(self):
if self.current_costume == -1:
return None
return self.costumes[self.current_costume]
def get_center(self):
if self.current_costume == -1:
return 0, 0
return pygame.Vector2(self.costumes[self.current_costume].center_x, self.costumes[self.current_costume].center_y)
def rotate_and_scale(self):
# Based on:
# https://stackoverflow.com/questions/54462645/how-to-rotate-an-image-around-its-center-while-its-scale-is-getting-largerin-py
# Rotation settings
flipped = False
if self.rotation_style == CostumeManager.ALL_AROUND:
angle = self.owner._direction
elif self.rotation_style == CostumeManager.NO_ROTATION:
angle = 0
else: # LEFT_RIGHT
angle = 0
flipped = True if 90 < self.owner._direction % 360 < 270 else False
# Zoom settings
scale = self.owner.size / 100
old_center = self.get_center()
old_center.y *= -1
center_rotated = old_center.rotate(angle)
# Corner points of the current rect
w, h = self.get_image().get_size()
box = [pygame.math.Vector2(p) for p in [(0, 0), (w, 0), (w, -h), (0, -h)]]
box_rotate = [p.rotate(angle) for p in box]
# Axis aligned bounding box
minx = min(box_rotate, key=lambda p: p[0])[0]
maxx = max(box_rotate, key=lambda p: p[0])[0]
miny = min(box_rotate, key=lambda p: p[1])[1]
maxy = max(box_rotate, key=lambda p: p[1])[1]
topleft = pygame.Vector2(minx, maxy)
# new center
new_center = center_rotated - topleft
new_center.y *= -1
new_center *= scale
# get a rotated image
rotozoom_image = pygame.transform.rotozoom(self.get_image(), angle, scale)
if flipped:
rotozoom_image = pygame.transform.flip(rotozoom_image, True, False)
return rotozoom_image, new_center
class Costume():
'''
This class handles and manages costumes and backdrops.
'''
def __init__(self, sprite, name, center_x=None, center_y=None, factor=1):
self.sprite = sprite
self.file = None
self.name = name
internal_folder = pkg_resources.resource_filename("pystage", "images/")
for folder in ["", "images/", "bilder/", internal_folder]:
for ext in ["", ".bmp", ".png", ".jpg", ".jpeg", ".gif", ".svg"]:
if os.path.exists(f"{folder}{name}{ext}"):
self.file = f"{folder}{name}{ext}"
break
if self.file is not None:
break
if self.file is None:
self.file = pkg_resources.resource_filename("pystage", "images/zombie_idle.png")
if self.file.endswith(".svg"):
print(f"Converting SVG file: {self.file}")
print("\nWARNING: SVG conversion is for convenience only")
print("and might not work as expected. It is recommended")
print("to manually convert to bitmap graphics (png or jpg).\n")
# Deactivated for now because of Windows problems. See issue #10
# with stderr_redirector(io.BytesIO()):
rlg = svg2rlg(self.file)
pil = renderPM.drawToPIL(rlg)
self.image = pygame.image.frombuffer(pil.tobytes(), pil.size, pil.mode)
else:
self.image = pygame.image.load(self.file)
if factor!=1:
self.image = pygame.transform.rotozoom(self.image, 0, 1.0/factor)
self.image = self.image.subsurface(self.image.get_bounding_rect())
# The offset resulting from the image crop
offset = pygame.Vector2(self.image.get_offset())
self.center_x = (float(self.image.get_parent().get_width()) / 2) - offset.x if center_x is None else (float(center_x) / factor) - offset.x
self.center_y = (float(self.image.get_parent().get_height()) / 2) - offset.y if center_y is None else (float(center_y) / factor) - offset.y
print(f"New costume: {name} -> {self.file}")
def __str__(self):
return f"{self.name} ({self.center_x}, {self.center_y})"
class SoundManager():
def __init__(self, owner):
self.owner = owner
self.sounds = {}
def add_sound(self, name):
if isinstance(name, str):
sound = Sound(self, name)
self.sounds[name]=sound
else:
for n in name:
self.add_sound(n)
def get_sound(self, name):
return self.sounds[name].sound
class Sound():
'''
This class handles and manages sounds.
'''
def __init__(self, sprite, name):
self.name = name
self.sprite = sprite
self.file = None
self.sound = None
internal_folder = pkg_resources.resource_filename("pystage", "sounds/")
for folder in ["", "sounds/", "klaenge/", internal_folder]:
for ext in ["", ".wav", ".ogg", ".mp3"]:
if os.path.exists(f"{folder}{name}{ext}"):
self.file = f"{folder}{name}{ext}"
break
if self.file is not None:
break
if self.file.endswith(".mp3"):
print("WARNING: MP3 is not supported in pyStage. Use wav or ogg format.")
elif self.file is not None:
self.sound = pygame.mixer.Sound(self.file)
def __str__(self):
return f"{self.name}"
|
the-stack_106_15730
|
#utf-8
import numpy as np
import cv2
def filter_f1997(src):
filter_map = cv2.imread('resources/images/1977map.png')
filter_map = cv2.cvtColor(filter_map,cv2.COLOR_BGR2RGB)
map_r = filter_map[:,:,0].copy().reshape((256,))
map_g = filter_map[:,:,1].copy().reshape((256,))
map_b = filter_map[:,:,2].copy().reshape((256,))
dst = np.zeros_like(src)
dst[:,:,0] = map_r[src[:,:,0]]
dst[:,:,1] = map_r[src[:,:,1]]
dst[:,:,2] = map_r[src[:,:,2]]
return dst
|
the-stack_106_15731
|
from . import base
from . import mixins
from .. import cleaver
from datetime import date
import string
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'full_name': 'Name',
'department': 'Department',
'job_title': 'Title',
'hire_date': 'Hire Date',
'compensation': 'Annual Salary Rate',
'gender': 'Gender',
'given_race': 'RaceEthinicity',
'employee_type': 'Fulltime or Parttime',
}
ORGANIZATION_NAME = 'University of Texas at Austin'
ORGANIZATION_CLASSIFICATION = 'University'
DATE_PROVIDED = date(2019, 9, 5)
URL = "http://raw.texastribune.org.s3.amazonaws.com/ut_austin/salaries/2019-07/employee_data_new.xlsx"
gender_map = {
'Female':'F',
'Male':'M',
'': 'Unknown'
}
description = 'Annual salary rate'
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.full_name.strip() != ''
@property
def person(self):
name = self.get_name()
r = {
'family_name': name.last,
'given_name': name.first,
'additional_name': name.middle,
'name': unicode(name),
'gender': self.gender_map[self.gender.strip()]
}
return r
@property
def job_title(self):
title = self.get_mapped_value('job_title').split(' - ')[1]
return title
@property
def race(self):
race = self.given_race.strip()
return {'name': race}
@property
def compensation_type(self):
employee_type = self.employee_type
if employee_type == 'Full-time':
return 'FT'
if employee_type == 'Part-time':
return 'PT'
def get_name(self):
return cleaver.EmployeeNameCleaver(
self.get_mapped_value('full_name')).parse()
transform = base.transform_factory(TransformedRecord)
|
the-stack_106_15732
|
# Based on https://github.com/princeton-vl/RAFT
import numpy as np
import random
import math
import cv2
from PIL import Image
import torch
import torchvision
import torch.nn.functional as F
from config import cfg
class FlowAugmentor:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5,spatial_aug_prob=0.8):
self.crop_size = crop_size
self.augcolor = torchvision.transforms.ColorJitter(
brightness=cfg.AUG_brightness,
contrast=cfg.AUG_contrast,
saturation=cfg.AUG_saturation,
hue=cfg.AUG_hue)
self.asymmetric_color_aug_prob = 0.2
self.spatial_aug_prob = cfg.SPATIAL_AUG_PROB
self.eraser_aug_prob = 0.5
self.min_scale = min_scale
self.max_scale = max_scale
self.max_stretch = 0.2
self.stretch_prob = 0.8
self.margin = 20
def color_transform(self, img1, img2):
if cfg.ASY_COLOR_AUG:
if np.random.rand() < self.asymmetric_color_aug_prob:
img1 = np.array(self.augcolor(Image.fromarray(img1)), dtype=np.uint8)
img2 = np.array(self.augcolor(Image.fromarray(img2)), dtype=np.uint8)
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.augcolor(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
else:
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.augcolor(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform(self, img1, img2, bounds=[50, 100]):
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(bounds[0], bounds[1])
dy = np.random.randint(bounds[0], bounds[1])
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def spatial_transform(self, img1, img2, flow):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
max_scale = self.max_scale
min_scale = max(min_scale, self.min_scale)
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = scale
scale_y = scale
if np.random.rand() < self.stretch_prob:
scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
scale_x = np.clip(scale_x, min_scale, None)
scale_y = np.clip(scale_y, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
flow = resize_flow(flow,img1.shape[1],img1.shape[0])
if np.random.rand() < 0.5: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
if np.random.rand() < 0.1: # v-flip
img1 = img1[::-1, :]
img2 = img2[::-1, :]
flow = flow[::-1, :] * [1.0, -1.0]
y0 = np.random.randint(-self.margin, img1.shape[0] - self.crop_size[0] + self.margin)
x0 = np.random.randint(-self.margin, img1.shape[1] - self.crop_size[1] + self.margin)
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow
def __call__(self, img1, img2, flow):
img1, img2 = self.color_transform(img1, img2)
img1, img2 = self.eraser_transform(img1, img2)
img1, img2, flow = self.spatial_transform(img1, img2, flow)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
return img1, img2, flow
class FlowAugmentorKITTI:
def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5,logger=None):
self.crop_size = crop_size
self.logger = logger
self.augcolor = torchvision.transforms.ColorJitter(
brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
self.max_scale = max_scale
self.min_scale = min_scale
self.spatial_aug_prob = cfg.SPATIAL_AUG_PROB
self.eraser_aug_prob = 0.5
def color_transform(self, img1, img2):
image_stack = np.concatenate([img1, img2], axis=0)
image_stack = np.array(self.augcolor(Image.fromarray(image_stack)), dtype=np.uint8)
img1, img2 = np.split(image_stack, 2, axis=0)
return img1, img2
def eraser_transform_KITTI(self, img1, img2):
ht, wd = img1.shape[:2]
if np.random.rand() < self.eraser_aug_prob:
mean_color = np.mean(img2.reshape(-1, 3), axis=0)
for _ in range(np.random.randint(1, 3)):
x0 = np.random.randint(0, wd)
y0 = np.random.randint(0, ht)
dx = np.random.randint(50, 100)
dy = np.random.randint(50, 100)
img2[y0:y0+dy, x0:x0+dx, :] = mean_color
return img1, img2
def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
ht, wd = flow.shape[:2]
coords = np.meshgrid(np.arange(wd), np.arange(ht))
coords = np.stack(coords, axis=-1)
coords = coords.reshape(-1, 2).astype(np.float32)
flow = flow.reshape(-1, 2).astype(np.float32)
valid = valid.reshape(-1).astype(np.float32)
coords0 = coords[valid>=1]
flow0 = flow[valid>=1]
ht1 = int(round(ht * fy))
wd1 = int(round(wd * fx))
coords1 = coords0 * [fx, fy]
flow1 = flow0 * [fx, fy]
xx = np.round(coords1[:,0]).astype(np.int32)
yy = np.round(coords1[:,1]).astype(np.int32)
v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
xx = xx[v]
yy = yy[v]
flow1 = flow1[v]
flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
valid_img = np.zeros([ht1, wd1], dtype=np.int32)
flow_img[yy, xx] = flow1
valid_img[yy, xx] = 1
return flow_img, valid_img
def spatial_transform(self, img1, img2, flow, valid):
# randomly sample scale
ht, wd = img1.shape[:2]
min_scale = np.maximum(
(self.crop_size[0] + 1) / float(ht),
(self.crop_size[1] + 1) / float(wd))
scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
scale_x = np.clip(scale, min_scale, None)
scale_y = np.clip(scale, min_scale, None)
if np.random.rand() < self.spatial_aug_prob:
# rescale the images
img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
if cfg.SPARSE_RESIZE:
# SPARSE_RESIZE, default to be True
flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
else:
flow = resize_flow(flow,img1.shape[1],img1.shape[0],method='nearest')
valid = cv2.resize(valid, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_NEAREST)
if np.random.rand() < 0.5: # h-flip
img1 = img1[:, ::-1]
img2 = img2[:, ::-1]
flow = flow[:, ::-1] * [-1.0, 1.0]
valid = valid[:, ::-1]
margin_y = 20
margin_x = 50
y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
return img1, img2, flow, valid
def __call__(self, img1, img2, flow, valid):
img1, img2 = self.color_transform(img1, img2)
if not cfg.NO_ERASE:
img1, img2 = self.eraser_transform_KITTI(img1, img2)
img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
img1 = np.ascontiguousarray(img1)
img2 = np.ascontiguousarray(img2)
flow = np.ascontiguousarray(flow)
valid = np.ascontiguousarray(valid)
return img1, img2, flow, valid
def resize_flow(flow, des_width, des_height, method='bilinear'):
# improper for sparse flow
src_height = flow.shape[0]
src_width = flow.shape[1]
if src_width == des_width and src_height == des_height:
return flow
ratio_height = float(des_height) / float(src_height)
ratio_width = float(des_width) / float(src_width)
if method == 'bilinear':
flow = cv2.resize(
flow, (des_width, des_height), interpolation=cv2.INTER_LINEAR)
elif method == 'nearest':
flow = cv2.resize(
flow, (des_width, des_height), interpolation=cv2.INTER_NEAREST)
else:
raise Exception('Invalid resize flow method!')
flow[:, :, 0] = flow[:, :, 0] * ratio_width
flow[:, :, 1] = flow[:, :, 1] * ratio_height
return flow
|
the-stack_106_15733
|
from telescope_msk.send_graphyte_message import publish_kafka_to_graphite
def publish_metrics(metrics: list, graphite_host: str):
for metric in metrics:
keys = ["high", "low", "lag", "offset"]
for key in keys:
value = metric[key]
# EG: logstash.logs.partition_0.high.$high_watermark
publish_kafka_to_graphite(
create_metric_key(metric, key), value, graphite_host
)
def publish_metric_sums(metrics: list, graphite_host: str):
sums = {}
for metric in metrics:
topic_name = metric["topic_name"]
if topic_name not in sums:
sums[topic_name] = {"sum_high": 0, "sum_low": 0, "sum_lag": 0}
topic_sums = sums[topic_name]
topic_sums["sum_high"] += metric["high"]
topic_sums["sum_low"] += metric["low"]
topic_sums["sum_lag"] += metric["lag"]
for topic_name in sums:
topic_sum = sums[topic_name]
sum_range = topic_sum["sum_high"] - topic_sum["sum_low"]
publish_kafka_to_graphite(
f"{topic_name}.sum-high", topic_sum["sum_high"], graphite_host
)
publish_kafka_to_graphite(
f"{topic_name}.sum-low", topic_sum["sum_low"], graphite_host
)
publish_kafka_to_graphite(
f"{topic_name}.sum-lag", topic_sum["sum_lag"], graphite_host
)
publish_kafka_to_graphite(f"{topic_name}.sum-range", sum_range, graphite_host)
def create_metric_key(metric: list, key: str) -> str:
return f"{metric['topic_name']}.partition_{metric['partition_id']}.{key}"
|
the-stack_106_15735
|
import threading
from binascii import hexlify, unhexlify
from electrum_dash.util import bfh, bh2u
from electrum_dash.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, NetworkConstants)
from electrum_dash.i18n import _
from electrum_dash.plugins import BasePlugin
from electrum_dash.transaction import deserialize
from electrum_dash.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Dash Testnet" if NetworkConstants.TESTNET else "Dash"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_type = self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = b58_address_to_hash160(address)
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
script_type = self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == NetworkConstants.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == NetworkConstants.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype: ' + str(addrtype))
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
the-stack_106_15737
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 10:53:44 2019
Description:
The GUI app for 56G PON demo. For more information, refer to the
corresponding application note in the `lab604-automation` documentation.
@author: dongxucz
"""
import array
import sys
from os import getcwd
from time import sleep
from PyQt5.QtWidgets import QApplication, QMainWindow, QGraphicsScene, \
QGraphicsPixmapItem, QGraphicsView, QGraphicsItem, \
QPushButton, QLabel, QWidget, QGraphicsOpacityEffect, \
QGraphicsTextItem, QTextBrowser, QLineEdit, QGroupBox, \
QVBoxLayout, QGridLayout, QSlider
from PyQt5.QtGui import ( QBrush, QPen, QPainter, QPixmap, QFont, QColor,
QIcon, QTextDocument)
from PyQt5.QtCore import (Qt, QObject, QPointF, QSize, QRect, QEasingCurve,
QPropertyAnimation, pyqtProperty, pyqtSignal, QEvent, QStateMachine,
QSignalTransition, QState, QTimer)
from vtbackendlib.vt899 import extract_frame, resample_symbols
import numpy as np
import core.vt_device as vt_device
from core.ook_lib import OOK_signal
from core.ks_device import M8195A
from guiunits.connectbutton import ConnectBtn
from guiunits.speedometer import Speedometer
from guiunits.mlpplotwidget import pon56gDemoBerPlot, pon56gDemoMsePlot
from guiunits.pon56gDemoNNTrainingOutput_s import training_console_output
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset
############## for Debugging ################
_SIM = True
############ global variables ###############
VT899Addr = "10.242.13.34", 9998
M8195Addr = "10.242.13.77"
cwd = getcwd()
sample_folder = cwd+'/vtbackendlib/0726vt899pon56g/'
#csvpath = 'D:/PythonScripts/lab604-automation/vtbackendlib/0726vt899pon56g/'
csvpath = sample_folder
frame_len = 196608
ook_preamble = OOK_signal(load_file= csvpath+'Jul 6_1741preamble.csv')
ook_prmlb = ook_preamble.nrz(dtype = 'int8')
globalTrainset = OOK_signal()
if not _SIM: #
print('Loading data ..........')
globalTrainset.append(OOK_signal(load_file=csvpath+'Jul 9_0841train.csv'))
print('25% done ...')
globalTrainset.append(OOK_signal(load_file=csvpath+'Jul 9_0842train.csv'))
print('50% done ...')
globalTrainset.append(OOK_signal(load_file=csvpath+'Jul 9_0843train.csv'))
print('75% done ...')
globalTrainset.append(OOK_signal(load_file=csvpath+'Jul 9_0845train.csv'))
print('OK!\n')
# vt899.trainset = globalTrainset
# vt899.config('prmbl500', ook_prmlb.tobytes())
class vtdev(vt_device.VT_Device):
# algorithm state coding for self.algo_state:
Init = 0 # before setting preamble (cannot extract frame)
NoNN = 1 # can extract frame, but NN not trained
YesNN = 2 # NN trained
TranSit = 3 # A intermediate state: just after NN trained, but speedometer animation not done
def __init__(self, devname, frame_len=0, symbol_rate=0, addr=None, gui=True):
vt_device.VT_Device.__init__(self, devname, gui)
self.set_net_addr(addr)
self.frame_len = frame_len
self.n_prmbl = 500 # n_payload=195608. see work notebook-2 18-07-05
self.n_symbol_test = 10000
self.symbol_rate = symbol_rate
self.algo_state = vtdev.Init
self.set_gui_verbos(1,1,1)
# neural network algorithm related attributes
self.trainset = OOK_signal()
self.trainset_rx = np.array([])
self.neuralnet = self.init_nn(n_tap=15)
self.label_pos = self.n_tap - 4 # n_tap-1 means the label is the last symbol
self.max_epoch = 5
def set_preamble(self, preamble_int):
self.preamble_int = preamble_int
self.preamble_wave = None
self.preamble_len = len(preamble_int)
def prepare_trainloader(self, trainset_rx):
self.trainset_rx = trainset_rx
trainset_ref = self.trainset.nrz()
trainsymbols_x = self.trainset_rx[:-1*self.n_symbol_test]
trainsymbols_y = trainset_ref[:-1*self.n_symbol_test]
testsymbols_x = self.trainset_rx[len(trainsymbols_x):]
testsymbols_y = trainset_ref[len(trainsymbols_x):]
self.trainloader, self.testset = \
self.init_dataloader(self.n_tap, trainsymbols_x, trainsymbols_y,
testsymbols_x, testsymbols_y, self.label_pos, bs=50)
def train_nn(self, trainset_rx):
""" Train the self.neuralnet using trainset_rx.
Argument:
trainset_rx - a numpy array containing all data for both training
and validation. The seperation of training and validation data
is determined by self.n_symbol_test attribute (the last
n_symbol_test samples are for validation).
"""
self.prepare_trainloader(trainset_rx)
criterion = nn.MSELoss() #criterion = nn.CrossEntropyLoss()
criterion = criterion.double()
optimizer = optim.SGD(self.neuralnet.parameters(), lr=0.1, momentum=0.6)
accuracy_histbest = 0
accuracy_waiting_cnt = 0
for epoch in range(self.max_epoch): # loop over the dataset multiple times
running_loss = 0.0
for i, data_batched in enumerate(self.trainloader):
# get the inputs
inputs = data_batched['features']
labels = data_batched['labels'].unsqueeze(1).double()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = self.neuralnet(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
# print statistics
if (i % 299 == 0): # print every 300 mini-batches
self.msgbf.info('epoch %d-%d, loss: %.3f' %
(epoch + 1, i+1, running_loss / 299))
running_loss = 0.0
correct = 0
total = 0
output_dfnn_buf = torch.Tensor([[0]])
testset_outputs_dfnn = []
with torch.no_grad():
#for i, data in enumerate(testloader):
for i in range(self.testset.n_sample()):
####### extract data from dataset #######
data = self.testset.getitem(i)
inputs = torch.tensor(data['features']).unsqueeze(0)
labels = data['labels']
#inputs = data['features']
inputs[0][-1] = output_dfnn_buf[0][0]
outputs = self.neuralnet(inputs)
testset_outputs_dfnn.append(outputs.item())
predicted = np.round(outputs.item())
#output_dfnn_buf = outputs.clone() # can achieve 0 BER with 0 noise
output_dfnn_buf = torch.Tensor([[predicted]]) # can achieve 0 BER with 0 noise
total += 1
if predicted == labels.item():
correct += 1
else:
self.msgbf.info('{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}'.format(i,
'\n\tinput=',inputs,'\n',
'\tlabel=', labels, '\n',
'\toutput=',outputs.item(), predicted))
pass
self.msgbf.info('Accuracy on %d test data: %.4f %%' % (total, 100*correct/total))
# plt.hist(testset_outputs_dfnn,bins=100)
# plt.show()
if accuracy_waiting_cnt>1:
if accuracy_histbest <= correct/total:
break
if accuracy_histbest < correct/total:
accuracy_histbest = correct/total
accuracy_waiting_cnt = 0
else:
accuracy_waiting_cnt+=1
self.algo_state = vtdev.YesNN
def run_inference(self, testset_nrz, rx_symbol):
""" Run inference with the trained neural network.
Arguments:
testset_nrz - list of ±1 as the test labels
rx_symbol - the received signal via ADC.
"""
testsymbols_x = rx_symbol
testsymbols_y = testset_nrz
(test_x, test_y) = self.lineup(testsymbols_x, testsymbols_y,
n_tap=self.n_tap, label_pos=self.label_pos,
for_test=True)
testset = nn_ndarray_dataset(test_x, test_y)
correct = 0
total = 0
output_dfnn_buf = torch.Tensor([[0]])
testset_outputs_dfnn = []
with torch.no_grad():
for i in range(testset.n_sample()):
####### extract data from dataset #######
data = testset.getitem(i)
inputs = torch.tensor(data['features']).unsqueeze(0)
labels = data['labels']
#inputs = data['features']
inputs[0][-1] = output_dfnn_buf[0][0]
outputs = self.neuralnet(inputs)
testset_outputs_dfnn.append(outputs.item())
predicted = np.round(outputs.item())
#output_dfnn_buf = outputs.clone() # 0 BER with 0 noise
output_dfnn_buf = torch.Tensor([[predicted]])
total += 1
if predicted == labels.item():
correct += 1
else:
self.msgbf.info(i,
'\n\tinput=',inputs,'\n',
'\tlabel=', labels, '\n',
'\toutput=',outputs.item(),'-->', predicted)
pass
self.msgbf.info('BER on %d test data: %.4f %%' % (total, 100*(1-correct/total)))
def hd_decision(self, testset_bits, rx_symbol):
""" recover payload bits using hard decision to comapre BER. """
rx_hard = OOK_signal(data_ref=rx_symbol) #
n_error = 0
for i in range(rx_hard.data_len):
if (rx_hard.data_bits[i]!=testset_bits[i]):
n_error+=1
#print(i,rx_symbol[i],'->',trainset_nrz[i])
BER_hard = n_error/rx_hard.data_len
self.msgbf.info('BER=%.3f, accurate=%.1f %%' % (BER_hard,100*(1-BER_hard)))
def calcEexpectedGbps(self, ber, maxGbps=60):
""" calculate an 'achievable bit rate' based on a BER value"""
if ber>0.4: # abnormal
expectedGbps = 0
elif ber>0.009: # NoNN
expectedGbps = 12.5 # 14-ber*10
else: # YesNN
expectedGbps = -152.073*ber + 51.018
return expectedGbps
def save_trained_nn(self, nn_save_path):
torch.save(self.neuralnet, nn_save_path)
def init_dataloader(self, n_tap, train_x, train_y, test_x, test_y, label_pos, bs):
(_x, _y) = self.lineup(train_x, train_y, n_tap=n_tap,
label_pos=label_pos, framelen=196608)
(_x_t, _y_t) = self.lineup(test_x, test_y, n_tap=n_tap,
label_pos=label_pos, for_test=True)
trainset = nn_ndarray_dataset(_x, _y)
trainloader = DataLoader(trainset, batch_size=bs,
shuffle=True,drop_last=True) # num_workers=1
testset = nn_ndarray_dataset(_x_t, _y_t)
# testloader = DataLoader(testset)
# return (trainloader, testloader)
return (trainloader, testset)
def init_nn(self, n_tap):
self.n_tap = n_tap
D_in = n_tap+1
H_1 = n_tap+1
H_2 = 2
D_out = 1
df_nn = nn.Sequential(
nn.Linear(D_in, H_1, bias=False),
nn.Tanh(),
nn.Linear(H_1, H_2, bias=False),
nn.Tanh(),
nn.Linear(H_2, D_out),
)
df_nn = df_nn.double()
return df_nn
def lineup(self, x, y, n_tap, label_pos, for_test=False, framelen=None):
""" lineup feature and duo-binary labels for the decision-feedback NN"""
if framelen==None:
framelen = len(x)
if label_pos>n_tap-1:
raise ValueError('invalid label_pos')
else:
features = []
labels = []
n_frames = int(len(x)/framelen)
for frame_i in range(n_frames):
for i in range(framelen-n_tap+1):
temp_x = x[frame_i*framelen+i:frame_i*framelen+i+n_tap]
if for_test:
features.append(np.append(temp_x, 10))
else:
features.append(np.append(temp_x, (y[frame_i*framelen+i+label_pos-1]+y[frame_i*framelen+i+label_pos-2])/2))
labels.append((y[frame_i*framelen+i+label_pos]+y[frame_i*framelen+i+label_pos-1])/2)
for i in range(n_frames*framelen, len(x)-n_tap):
temp_x = x[i:i+n_tap]
if for_test:
features.append(np.append(temp_x, 10))
else:
features.append(np.append(temp_x, (y[i+label_pos-1]+y[i+label_pos-2])/2))
labels.append((y[i+label_pos]+y[i+label_pos-1])/2)
return(np.array(features),np.array(labels))
class awg(M8195A):
def __init__(self, addr):
M8195A.__init__("TCPIP0::{}::inst0::INSTR".format(addr))
self.set_ext_clk()
self.set_ref_out() # set ref_out frequency by configuring two dividers
# default parameters: div1=2, div2=5
self.set_fs_GHz(nGHz=56)
self.set_amp(0.6) # set output amplitude (range between 0 and 1)
self.set_ofst(0) # set output offset
class nn_ndarray_dataset(Dataset):
"""the customized data reader for Pytorch's DataLoader utility.
Inherited from the abstract class 'Dataset' with overided __len__()
and __getitem__() methods. Takes ndarray as inputs.
"""
def __init__(self, dataframe_x, dataframe_y, transform=None):
"""
Args:
pd_dataframe_x/y, pandas dataframe of feature/label.
"""
self.dataframe_x = dataframe_x
self.dataframe_y = dataframe_y
self.transform = transform
def __len__(self):
return self.dataframe_x.shape[0]
def __getitem__(self, idx):
sample = {'features':self.dataframe_x[idx],
'labels':self.dataframe_y[idx]}
return sample
getitem = __getitem__
n_sample = __len__
def read_sample_bin_file(filepath, dtype = 'b'): # default formating 'b'=int8
with open(filepath,'rb') as fo:
filedata = array.array(dtype,fo.read())
return np.array(filedata.tolist())
def normalize_rxsymbols(rx_raw):
rx_shift = (np.array(rx_raw)-np.mean(rx_raw))
return rx_shift/np.max(np.abs(rx_shift))
class itemClickedSgnlWrapper(QObject):
sgnl = pyqtSignal()
class Fan(QObject):
""" Define a class to show a picture of a fan, for animation.
To define a pyqtProperty for animation, the base class should be a QObject
or any other inherited classes, like QWidget.
Then, add a QGraphicsPixmapItem to host the picture.
"""
def __init__(self, parent=None):
super().__init__(parent)
self.pixmap_item = QGraphicsPixmapItem(QPixmap(cwd+'/guiunits/imags/pon56gdemo/fan.png'))
self.pixmap_item.setTransformOriginPoint(self.pixmap_item.boundingRect().center())
#self.clickedSgnlWrapper = itemClickedSgnlWrapper()
#self.clicked = self.clickedSgnlWrapper.sgnl
#self.pixmap_item.mousePressEvent = self.clickEventHandler
def clickEventHandler(self, event):
print('emitting signal')
self.clicked.emit()
def _set_rotation_dgr(self, dgr):
self.pixmap_item.setRotation(dgr)
def fanAnimation(self):
anim = QPropertyAnimation(self, b'rotation')
anim.setDuration(1000)
anim.setStartValue(0)
anim.setEndValue(360)
anim.setLoopCount(-1)
return anim
# define a property named as 'rotation', and designate a setter function.
rotation = pyqtProperty(float, fset=_set_rotation_dgr)
class fadingPic(QObject):
""" Wrap a QGraphicsPixmapItem and impliment the fade in/out animation"""
def __init__(self, pixmap, parent=None):
super().__init__(parent)
self.pixmap_item = QGraphicsPixmapItem(pixmap)
self.text_item = QGraphicsTextItem("Nerual Network based Channel Learning")
font = QFont("Nokia Pure Text Light", 18) # QFont.Bold
self.text_item.setFont(font)
self.text_item.setDefaultTextColor(QColor(18, 65, 145))
def _set_opacity(self, opc):
self.pixmap_item.setOpacity(opc)
self.text_item.setOpacity(opc)
def fadeIn(self):
anim = QPropertyAnimation(self, b'opacity')
anim.setDuration(800)
anim.setStartValue(0)
anim.setEndValue(1)
#anim.setLoopCount(1)
return anim
def fadeOut(self):
anim = QPropertyAnimation(self, b'opacity')
anim.setDuration(800)
anim.setStartValue(1)
anim.setEndValue(0)
#anim.setLoopCount(1)
return anim
opacity = pyqtProperty(float, fset=_set_opacity)
class AppWindow(QMainWindow):
def __init__(self, datadevice, awg):
super().__init__()
self.datadevice = datadevice
self.awg = awg
self.nokia_blue = QColor(18, 65, 145)
self.title = "High-speed PON demo"
self.geo = {
'top' : 30,
'left' : 0,
'width' : 1920,
'height': 1080 }
self.setStyleSheet("background-color: white;")
self._detailFigure_2ClickedSigWrapper = itemClickedSgnlWrapper()
self.detailFigure_2Clicked = self._detailFigure_2ClickedSigWrapper.sgnl
self.setFocusPolicy(Qt.StrongFocus)
self.initWindow()
self._lang = 'en' # language. Changed on pressing "L" key
def initWindow(self):
self.setWindowTitle(self.title)
self.setGeometry(self.geo['left'], self.geo['top'],
self.geo['width'], self.geo['height'])
self.titleBar = self.inittitlebar()
self.bkgrndGroup = self.initbkgrndgroup()
self.detailGroup = self.initdetailgroup()
self.prototypeGroup = self.initprototypeGroup()
self.initGeometries()
self.initConnections()
# self.fanAnim.start()
self.show()
def initGeometries(self):
self.titleBar.setGeometry(147 ,59, 1625, 69)
self.bkgrndGroup.setGeometry(20 ,178, 570, 826)
self.detailGroup.setGeometry(610 ,178, 1285, 420)
self.prototypeGroup.setGeometry(610, 613, 1285, 391)
def inittitlebar(self):
wdgt = QWidget(parent=self)
mainTitle = QLabel(parent=wdgt)
mainTitle.setText("Ultra-Fast Fiber Access with Intelligent PHY") #
font = QFont("Nokia Pure Text Light", 35, QFont.Bold)
mainTitle.setFont(font)
# mainTitle.setFrameStyle(22) # show border
mainTitle.setAlignment(Qt.AlignHCenter | Qt.AlignCenter) # Qt.AlignRight
palette = self.palette()
palette.setColor(self.foregroundRole(), self.nokia_blue)
mainTitle.setPalette(palette)
mainTitle.setGeometry(50,0,950, 69)
subTitle = QLabel(parent=wdgt)
subTitle.setText("—— Enabling 50Gbps PON")
font = QFont("Nokia Pure Text Light", 20)
subTitle.setFont(font)
# subTitle.setFrameStyle(22) # show border
subTitle.setAlignment(Qt.AlignLeft | Qt.AlignBottom)
palette = self.palette()
palette.setColor(self.foregroundRole(), self.nokia_blue)
subTitle.setPalette(palette)
subTitle.setGeometry(1010,16,600, 40)
self.mainTitle = mainTitle
self.subTitle = subTitle
return wdgt
def initbkgrndgroup(self):
wdgt = QWidget(parent=self)
title = QLabel(parent=wdgt)
title.setText("Growing Demand for Access")
font = QFont("Nokia Pure Text Light", 25, QFont.Bold)
title.setFont(font)
# title.setFrameStyle(22) # show border
title.setAlignment(Qt.AlignLeft | Qt.AlignCenter) # Qt.AlignHCenter
palette = self.palette()
palette.setColor(self.foregroundRole(), self.nokia_blue)
title.setPalette(palette)
title.setGeometry(20, 10, 490, 69)
bkgrndYear = QLabel(parent=wdgt)
bkgrndYear.setPixmap(QPixmap(cwd+'/guiunits/imags/pon56gdemo/bkgrndyear.png'))
bkgrndYear.move(25,110)
bkgrndSlider = QPushButton(parent=wdgt)
bkgrndSlider.setFixedSize(40,60)
bkgrndSlider.setStyleSheet("QPushButton { background : transparent }")
bkgrndSlider.setIcon(QIcon(cwd+'/guiunits/imags/pon56gdemo/bkgrndslider.png'))
bkgrndSlider.setIconSize(QSize(50,63))
bkgrndSlider.setFlat(True)
bkgrndSlider.move(38,640)
sliderAnim_1 = QPropertyAnimation(bkgrndSlider, b"geometry")
sliderAnim_1.setStartValue(QRect(38, 640, 40, 60))
sliderAnim_1.setEndValue(QRect(38, 400, 40, 60))
sliderAnim_1.setDuration(1000)
sliderAnim_1.setEasingCurve(QEasingCurve.OutQuad)
sliderAnim_2 = QPropertyAnimation(bkgrndSlider, b"geometry")
sliderAnim_2.setStartValue(QRect(38, 400, 40, 60))
sliderAnim_2.setEndValue(QRect(38, 160, 40, 60))
sliderAnim_2.setDuration(1000)
sliderAnim_2.setEasingCurve(QEasingCurve.OutQuad)
bkgrnd2015 = QLabel(parent=wdgt)
bkgrnd2015.setPixmap(QPixmap(cwd+'/guiunits/imags/pon56gdemo/bkgrnd2015.png'))
bkgrnd2015.move(280, 600)
# anim2015 = QPropertyAnimation(bkgrnd2015, b"windowOpacity")
bkgrnd2020 = QLabel(parent=wdgt)
bkgrnd2020.setPixmap(QPixmap(cwd+'/guiunits/imags/pon56gdemo/bkgrnd2020.png'))
bkgrnd2020.move(270, 340)
mask2020 = QGraphicsOpacityEffect(parent=bkgrnd2020)
bkgrnd2020.setGraphicsEffect(mask2020)
mask2020.setOpacity(0)
bkgrnd2020FadeIn = QPropertyAnimation(mask2020, b"opacity")
bkgrnd2020FadeIn.setDuration(1000)
bkgrnd2020FadeIn.setStartValue(0)
bkgrnd2020FadeIn.setEndValue(1)
bkgrnd2020FadeIn.setEasingCurve(QEasingCurve.InQuad)
bkgrnd2025 = QLabel(parent=wdgt)
bkgrnd2025.setPixmap(QPixmap(cwd+'/guiunits/imags/pon56gdemo/bkgrnd2025.png'))
bkgrnd2025.move(275, 110)
mask2025 = QGraphicsOpacityEffect(parent=bkgrnd2025)
bkgrnd2025.setGraphicsEffect(mask2025)
mask2025.setOpacity(0)
bkgrnd2025FadeIn = QPropertyAnimation(mask2025, b"opacity")
bkgrnd2025FadeIn.setDuration(1000)
bkgrnd2025FadeIn.setStartValue(0)
bkgrnd2025FadeIn.setEndValue(1)
bkgrnd2025FadeIn.setEasingCurve(QEasingCurve.InQuad)
wdgt.setStyleSheet("background-color: rgb(242, 242, 242);")
self.bkgrndTitle = title
self.bkgrndSlider = bkgrndSlider
self.sliderAnim_1 = sliderAnim_1
self.sliderAnim_2 = sliderAnim_2
self.mask2020 = mask2020
self.mask2025 = mask2025
self.bkgrnd2020FadeIn = bkgrnd2020FadeIn
self.bkgrnd2025FadeIn = bkgrnd2025FadeIn
self._bkgrndSliderStatus = 0 # 0 - @2015; 1 - @2020; 2 - @2025
return wdgt
def initdetailgroup(self):
view = QGraphicsView(parent=self)
brush=QBrush(QColor(242, 242, 242))
view.setBackgroundBrush(brush)
view.setFrameStyle(16) # QFrame.Plain
def clickEventHandler(event):
self.detailFigure_2Clicked.emit()
detailFigure_1 = QGraphicsPixmapItem(QPixmap(cwd+'/guiunits/imags/pon56gdemo/detailfigure_1.png'))
detailFigure_2_Qobj = fadingPic(QPixmap(cwd+'/guiunits/imags/pon56gdemo/detailfigure_2.png'))
detailFigure_2 = detailFigure_2_Qobj.pixmap_item
detailFigure_2_title = detailFigure_2_Qobj.text_item
detailFigure_1.mousePressEvent = clickEventHandler
title = QGraphicsTextItem("Our Innovation/Contribution")
font = QFont("Nokia Pure Text Light", 25, QFont.Bold)
title.setFont(font)
title.setDefaultTextColor(self.nokia_blue)
textItem1 = QGraphicsTextItem()
textItem1.setHtml('''<body style="font-family:Nokia Pure Text Light;color:#124191;font-size:23px;">
<div >10GHz</div>
<div > Optics </div>
</body>''')
textItem1.setTextWidth(80)
textItem2 = QGraphicsTextItem()
textItem2.setHtml('''<body style="font-family:Nokia Pure Text Light;color:#124191;font-size:23px;">
<div > 10GHz</div>
<div > Optics </div>
</body>''')
textItem2.setTextWidth(100)
fan = Fan() # a QObject which wraps a QGraphicsItem inside
scene = QGraphicsScene()
scene.setSceneRect(0, 0, 1285, 420)
scene.addItem(detailFigure_2)
scene.addItem(detailFigure_1)
scene.addItem(detailFigure_2_title)
scene.addItem(textItem1)
scene.addItem(textItem2)
scene.addItem(title)
scene.addItem(fan.pixmap_item)
detailFigure_1.setPos(QPointF(35, 88))
detailFigure_2.setPos(QPointF(570, 96))
detailFigure_2.setOpacity(0) # hided at first
detailFigure_2_title.setPos(QPointF(750, 46))
detailFigure_2_title.setOpacity(0)
title.setPos(QPointF(50,20))
textItem1.setPos(QPointF(40, 168))
textItem2.setPos(QPointF(361, 168))
fan.pixmap_item.setPos(QPointF(456.5, 138))
self.fanAnim = fan.fanAnimation()
view.setScene(scene)
view.setSceneRect(0, 0, 1285, 420)
view.setAlignment(Qt.AlignLeft | Qt.AlignTop)
view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
view.setRenderHint(QPainter.Antialiasing)
self.detailGrpTextItem1 = textItem1
self.detailGrpTextItem2 = textItem2
self.detailFigTitle = title
self.detailFigure_2_title = detailFigure_2_title
self.turbofan = fan
self.NNfigure_fadeIn = detailFigure_2_Qobj.fadeIn()
self.NNfigure_fadeOut = detailFigure_2_Qobj.fadeOut()
self._detailFigure_2_state = 0 # 0-hided, 1-showed
return view
def initprototypeGroup(self):
wdgt = QWidget(parent=self)
title = QLabel(parent=wdgt)
title.setText("Prototype Monitor")
font = QFont("Nokia Pure Text Light", 25, QFont.Bold)
title.setFont(font)
# title.setFrameStyle(22) # show border
title.setAlignment(Qt.AlignLeft | Qt.AlignCenter)
palette = self.palette()
palette.setColor(self.foregroundRole(), self.nokia_blue)
title.setPalette(palette)
title.setGeometry(50, 10, 300, 69)
meter = Speedometer(title='Data Rate', unit = 'Gbps',
min_value=0, max_value=55, parent=wdgt)
meter.setGeometry(40, 80, 320, 270)
initMeterAnim = QPropertyAnimation(meter, b"value")
initMeterAnim.setStartValue(0)
initMeterAnim.setEndValue(12)
initMeterAnim.setDuration(500)
boostMeterAnim = QPropertyAnimation(meter, b"value")
boostMeterAnim.setStartValue(12)
boostMeterAnim.setEndValue(50)
boostMeterAnim.setDuration(3000)
boostMeterAnim.setEasingCurve(QEasingCurve.InQuint)
berPlot = pon56gDemoBerPlot(parent=wdgt, width=3.5, height=2, tight_layout=True,
dpi=100, datadevice=self.datadevice)
berPlot.setGeometry(405, 25, 420, 170)
msePlot = pon56gDemoMsePlot(parent=wdgt, width=3.5, height=2, tight_layout=True,
dpi=100)
msePlot.setGeometry(405, 195, 420, 170)
self.updateTimer = QTimer()
self.updateTimer.setInterval(1100)
ConsoleGroupBox = QGroupBox("Device Control Panel", parent=wdgt)
ConsoleGroupBox.setGeometry(870, 22, 370, 340)
Console = QTextBrowser()
AddrEdit = QLineEdit()
# AddrEdit.setText('10.242.13.34')
AddrEdit.setText('192.168.1.199')
ConnectButton = ConnectBtn(AddrEdit)
ResetNNButton = QPushButton("Reset")
TrainButton = QPushButton("Train NN")
QuitButton = QPushButton("Quit")
layout = QVBoxLayout()
sublayout = QGridLayout()
sublayout_widget = QWidget()
sublayout.addWidget(AddrEdit, 1, 0, 1, 2)
sublayout.addWidget(ConnectButton, 1, 2)
sublayout.addWidget(ResetNNButton, 2, 0)
sublayout.addWidget(TrainButton, 2, 1)
sublayout.addWidget(QuitButton, 2, 2)
sublayout_widget.setLayout(sublayout)
layout.addWidget(Console)
layout.addWidget(sublayout_widget)
ConsoleGroupBox.setLayout(layout)
AddrEdit.setStyleSheet("background-color: rgb(255, 255, 255);")
Console.setStyleSheet("background-color: rgb(255, 255, 255);")
wdgt.setStyleSheet("background-color: rgb(242, 242, 242);")
self.prototypeTitle = title
self.AddrEdit = AddrEdit
self.Console = Console
self.meter = meter
self.initMeterAnim = initMeterAnim
self.boostMeterAnim = boostMeterAnim
self.ConnectButton = ConnectButton
self.TrainButton = TrainButton
self.QuitButton = QuitButton
self.ResetNNButton = ResetNNButton
self.berPlot = berPlot
self.msePlot = msePlot
return wdgt
def bkgrndGroupSM(self):
""" State machine of animations """
if self._bkgrndSliderStatus==0: # move from 2015 to 2020
self.sliderAnim_1.start()
self.bkgrnd2020FadeIn.start()
self._bkgrndSliderStatus = 1
elif self._bkgrndSliderStatus==1: # move from 2020 to 2025
self.sliderAnim_2.start()
self.bkgrnd2025FadeIn.start()
self._bkgrndSliderStatus = 2
elif self._bkgrndSliderStatus==2: # move back to 2015
self.bkgrndSlider.move(38,640)
self.mask2020.setOpacity(0)
self.mask2025.setOpacity(0)
self._bkgrndSliderStatus = 0
def detailFigSM(self):
""" State machine of animations """
if self._detailFigure_2_state == 0:
self.NNfigure_fadeIn.start()
self._detailFigure_2_state = 1
self.fanAnim.start()
else:
self.NNfigure_fadeOut.start()
self._detailFigure_2_state = 0
self.fanAnim.stop()
def initConnections(self):
self.bkgrndSlider.clicked.connect(self.bkgrndGroupSM)
self.detailFigure_2Clicked.connect(self.detailFigSM)
self.ConnectButton.clicked.connect(self.openVTdevice)
self.AddrEdit.returnPressed.connect(self.openVTdevice)
self.QuitButton.clicked.connect(self.closeEvent)
self.datadevice.guisgnl.connect(self.Console.append)
self.updateTimer.timeout.connect(self.berPlot.update_figure)
self.berPlot.plot2Console.connect(self.Console.append)
self.berPlot.plot2Meter.connect(self.meter.setSpeed)
# self.TrainButton.clicked.connect(self.trainNN) # train NN
self.TrainButton.clicked.connect(self.trainNN_temp) # train NN, just GUI effect
self.ResetNNButton.clicked.connect(self.resetPlot)
def openVTdevice(self):
ipaddr = self.AddrEdit.text()
print((ipaddr, 9998))
self.datadevice.set_net_addr((ipaddr,9998))
self.Console.append('connecting to'+ ipaddr)
if 'Connected' in self.datadevice.open_device().split():
self.datadevice.query('hello')
self.Console.append('Set preamble ...')
self.setPreamble()
def setPreamble(self):
self.datadevice.set_preamble(ook_prmlb) # local int preamble record
#self.datadevice.config('prmbl500', ook_prmlb.tobytes()) # write the same to remote backend
self.datadevice.trainset = globalTrainset
sleep(2) # the backend need several seconds to do resample & correlation
#ref_bin = self.datadevice.query_bin('getRef 2000')
#vt899.preamble_wave = np.array(memoryview(ref_bin).cast('f').tolist())
self.Console.append('Preamble synchronization Done! ')
self.datadevice.algo_state = self.datadevice.NoNN
self.initMeterAnim.finished.connect(self.updateTimer.start)
self.initMeterAnim.start()
def trainNN(self):
# self.fanAnim.start() # start fan animation to indicate neural network
if ((self.datadevice.open_state == 0) or (self.datadevice.algo_state==self.datadevice.Init)):
self.Console.append('Device not opend, or preamble not set. Cannot procced.')
else:
frame_len = self.datadevice.frame_len
trainset_rx = []
if not _SIM:
for i in range(4):
print(slice(i*frame_len, (i+1)*frame_len))
data_for_train = globalTrainset.take(slice(i*frame_len, (i+1)*frame_len))
self.awg.send_binary_port1(250*(data_for_train-0.5))
sleep(2) # make sure the ADC has captured the new waveform
frame_bin = self.datadevice.query_bin('getFrame 786432')
frame = list(memoryview(frame_bin).cast('f').tolist())
trainset_rx.extend(frame)
else:
for i in range(4):
path_str = sample_folder+'chan1-{0}.bin'.format(i)
samples_all = normalize_rxsymbols(read_sample_bin_file(path_str))
(samples_frame, cl) = extract_frame(samples_all, 196608, ook_preamble.nrz())
trainset_rx.extend(resample_symbols(samples_frame, self.datadevice.preamble_wave))
self.datadevice.train_nn(np.array(trainset_rx))
def trainNN_temp(self):
# not really running NN, in order to test pure GUI functions
self.updateTimer.stop()
self.msePlot.reset()
if ((self.datadevice.open_state == 0) or (self.datadevice.algo_state==self.datadevice.Init)):
self.Console.append('Device not opend, or preamble not set. Cannot procced.')
else:
texts = training_console_output[:]
tempTimer = QTimer()
tempTimer.setInterval(30)
def printTrainingOutput():
if texts:
text_item = texts.pop(0)
self.Console.append(text_item)
if text_item[-5:]=='demo)':
mse = float(text_item[-20:-15])
self.msePlot.update_figure(mse)
else:
tempTimer.stop()
self.datadevice.algo_state = self.datadevice.TranSit
self.updateTimer.start()
self.boostMeterAnim.finished.connect(self.changeAlgoState)
self.boostMeterAnim.start()
#self.updateTimer.start()
tempTimer.timeout.connect(printTrainingOutput)
tempTimer.start()
def resetPlot(self):
# clear the MSE plot, and turn the BER & speedometer state back to 12.5G
self.updateTimer.stop()
self.datadevice.algo_state = self.datadevice.NoNN
self.msePlot.reset()
self.updateTimer.start()
def changeAlgoState(self):
self.datadevice.algo_state = self.datadevice.YesNN
def cleanUpAndQuit(self):
# close the VT_Device to inform the backend ending the TCP session.
self.datadevice.close_device()
self.close()
def keyPressEvent(self, KEvent):
k = KEvent.key()
print(k,' pressed')
if k==Qt.Key_Q:
self.bkgrndSlider.click()
elif k==Qt.Key_W:
self.detailFigure_2Clicked.emit()
elif k==Qt.Key_T:
self.TrainButton.click()
elif k==Qt.Key_R:
self.ResetNNButton.click()
elif k==Qt.Key_L:
self.switchLang()
else:
pass
def switchLang(self):
if (self._lang == 'en'):
print("Switching language form EN to CN.")
self._lang = 'cn'
# self.mainTitle.setText('''<div style="font-family:微软雅黑;margin-left:250px;">基于 <i>人工智能</i> 的‘超高速光接入’ </div>''')
self.mainTitle.setText('''<div style="font-family:微软雅黑;margin-left:250px;"> <i>超高智能、超低成本</i>的下一代无源光网络 </div>''')
self.mainTitle.setGeometry(50,0,1150, 69)
self.subTitle.setText('''<div style="font-family:微软雅黑;"> ——50Gbps光接入 </div> ''')
self.subTitle.setHidden(True)
# setGeometry(1010,16,600, 40)
self.bkgrndTitle.setText("光接入的 ‘方案 vs 需求’")
self.detailFigTitle.setPlainText("颠覆式创新")
self.detailFigure_2_title.setPlainText("基于 深度递归神经网络 的光信道‘学习’")
self.prototypeTitle.setText("硬件平台实时监控")
self.detailGrpTextItem1.setHtml('''
<body style="font-family:Nokia Pure Text Light;color:#124191;font-size:23px;">
<div >10GHz</div>
<div > <b>光器件</b> </div>
</body>''')
self.detailGrpTextItem2.setHtml('''
<body style="font-family:Nokia Pure Text Light;color:#124191;font-size:23px;">
<div > 10GHz</div>
<div > <b>光器件</b> </div>
</body>''')
else:
print("Switching language form CN to EN.")
self._lang = 'en'
self.mainTitle.setText("Ultra-Fast Fiber Access with Intelligent PHY")
self.mainTitle.setGeometry(50,0,950, 69)
self.subTitle.setText('''<div style="font-family:微软雅黑;"> —— Enabling 50Gbps PON </div> ''')
#self.subTitle.setText("—— Enabling 50Gbps PON")
self.subTitle.setHidden(False)
self.bkgrndTitle.setText("Growing Demand for Access")
self.prototypeTitle.setText("Prototype Monitor")
self.detailFigTitle.setPlainText("Our Innovation/Contribution")
self.detailFigure_2_title.setPlainText("Nerual Network based Channel Learning")
self.detailGrpTextItem1.setHtml('''
<body style="font-family:Nokia Pure Text Light;color:#124191;font-size:23px;">
<div >10GHz</div>
<div > Optics </div>
</body>''')
self.detailGrpTextItem2.setHtml('''
<body style="font-family:Nokia Pure Text Light;color:#124191;font-size:23px;">
<div > 10GHz</div>
<div > Optics </div>
</body>''')
def closeEvent(self, ce):
self.cleanUpAndQuit()
if __name__ == '__main__':
if not _SIM:
print('Initializing AWG ...')
m8195a = awg(M8195Addr)
# send a frame containing preamble
data_for_prmbl_sync = globalTrainset.take(slice(frame_len))
awg.send_binary_port1(250*(data_for_prmbl_sync-0.5))
sleep(1) # make sure the ADC has captured the new waveform
else:
m8195a = None
vt899 = vtdev("vt899pondemo", frame_len=frame_len, symbol_rate=56, gui=True)
pon56Gdemo = QApplication(sys.argv)
window = AppWindow(datadevice=vt899, awg=m8195a)
sys.exit(pon56Gdemo.exec())
print("close device")
vt899.close_device()
ook_preamble = OOK_signal(load_file= csvpath+'Jul 6_1741preamble.csv')
ook_prmlb = ook_preamble.nrz(dtype = 'int8')
#
## corr_result = np.array(memoryview(vt899.query_bin('getCorr 1570404')).cast('f').tolist())
## plt.plot(corr_result)
#
# if not _SIM:
# ookrand = OOK_signal()
# ookrand.append(ook_preamble)
# ookrand.append(np.random.randint(2,size=frame_len-2*ook_preamble.data_len))
# ookrand.append(ook_preamble)
# awg.send_binary_port1(126*ookrand.nrz(), rescale = False)
# rx_bin = vt899.query_bin('getFrame 786432')
# rx_frame = list(memoryview(rx_bin).cast('f').tolist())
# else:
# ook_rand = OOK_signal(data_ref=globalTrainset.take(slice(0*frame_len, 1*frame_len)))
# rx_all = normalize_rxsymbols(read_sample_bin_file(csvpath+'chan1-0.bin'))
# (rx_orign, cl) = extract_frame(rx_all, 196608, ook_preamble.nrz())
# rx_frame = resample_symbols(rx_orign, vt899.preamble_wave)
#
# vt899.hd_decision(ook_rand.nrz(), rx_frame)
# vt899.run_inference(ook_rand.nrz(), rx_frame)
#
# vt899.close_device()
#
#
|
the-stack_106_15738
|
# -*- coding: utf-8 -*-
import os
import time
from datetime import date, datetime, timedelta
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
import mock
from nose.tools import eq_
import amo
import amo.tests
import mkt
from mkt.api.models import Nonce
from mkt.developers.models import ActivityLog
from mkt.files.models import File
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
from mkt.versions.models import Version
from mkt.webapps import cron
from mkt.webapps.cron import (clean_old_signed, mkt_gc, update_app_trending,
update_downloads)
from mkt.webapps.models import Addon, Webapp
from mkt.webapps.tasks import _get_trending
class TestLastUpdated(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def test_catchall(self):
"""Make sure the catch-all last_updated is stable and accurate."""
# Nullify all datestatuschanged so the public add-ons hit the
# catch-all.
(File.objects.filter(status=amo.STATUS_PUBLIC)
.update(datestatuschanged=None))
Addon.objects.update(last_updated=None)
cron.addon_last_updated()
for addon in Addon.objects.filter(status=amo.STATUS_PUBLIC):
eq_(addon.last_updated, addon.created)
# Make sure it's stable.
cron.addon_last_updated()
for addon in Addon.objects.filter(status=amo.STATUS_PUBLIC):
eq_(addon.last_updated, addon.created)
class TestHideDisabledFiles(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
msg = 'Moving disabled file: %s => %s'
def setUp(self):
self.addon = Webapp.objects.get(pk=337141)
self.version = self.addon.latest_version
self.f1 = self.version.all_files[0]
@mock.patch('mkt.files.models.os')
def test_leave_nondisabled_files(self, os_mock):
stati = [(amo.STATUS_PUBLIC, amo.STATUS_PUBLIC)]
for addon_status, file_status in stati:
self.addon.update(status=addon_status)
File.objects.update(status=file_status)
cron.hide_disabled_files()
assert not os_mock.path.exists.called, (addon_status, file_status)
@mock.patch('mkt.files.models.File.mv')
@mock.patch('mkt.files.models.storage')
def test_move_user_disabled_addon(self, m_storage, mv_mock):
# Use Addon.objects.update so the signal handler isn't called.
Addon.objects.filter(id=self.addon.id).update(
status=amo.STATUS_PUBLIC, disabled_by_user=True)
File.objects.update(status=amo.STATUS_PUBLIC)
cron.hide_disabled_files()
# Check that f1 was moved.
mv_mock.assert_called_with(self.f1.file_path,
self.f1.guarded_file_path, self.msg)
# There's only 1 file.
eq_(mv_mock.call_count, 1)
@mock.patch('mkt.files.models.File.mv')
@mock.patch('mkt.files.models.storage')
def test_move_admin_disabled_addon(self, m_storage, mv_mock):
Addon.objects.filter(id=self.addon.id).update(
status=amo.STATUS_DISABLED)
File.objects.update(status=amo.STATUS_PUBLIC)
cron.hide_disabled_files()
# Check that f1 was moved.
mv_mock.assert_called_with(self.f1.file_path,
self.f1.guarded_file_path, self.msg)
# There's only 1 file.
eq_(mv_mock.call_count, 1)
@mock.patch('mkt.files.models.File.mv')
@mock.patch('mkt.files.models.storage')
def test_move_disabled_file(self, m_storage, mv_mock):
Addon.objects.filter(id=self.addon.id).update(
status=amo.STATUS_REJECTED)
File.objects.filter(id=self.f1.id).update(status=amo.STATUS_DISABLED)
cron.hide_disabled_files()
# f1 should have been moved.
mv_mock.assert_called_with(self.f1.file_path,
self.f1.guarded_file_path, self.msg)
eq_(mv_mock.call_count, 1)
@mock.patch('mkt.files.models.File.mv')
@mock.patch('mkt.files.models.storage')
def test_ignore_deleted_versions(self, m_storage, mv_mock):
# Apps only have 1 file and version delete only deletes one.
self.version.delete()
mv_mock.reset_mock()
# Create a new version/file just like the one we deleted.
version = Version.objects.create(addon=self.addon)
File.objects.create(version=version, filename='f2')
cron.hide_disabled_files()
# Mock shouldn't have been called.
assert not mv_mock.called, mv_mock.call_args
class TestWeeklyDownloads(amo.tests.TestCase):
def setUp(self):
self.app = Webapp.objects.create(type=amo.ADDON_WEBAPP,
status=amo.STATUS_PUBLIC)
def get_app(self):
return Webapp.objects.get(pk=self.app.pk)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_weekly_downloads(self, _mock):
client = mock.Mock()
raw = {
'facets': {
'installs': {
'_type': 'date_histogram',
'entries': [
{'count': 3,
'time': 1390780800000,
'total': 19.0},
{'count': 62,
'time': 1391385600000,
'total': 236.0}
]
}
}
}
client.raw.return_value = raw
_mock.return_value = client
eq_(self.app.weekly_downloads, 0)
update_downloads([self.app.pk])
self.app.reload()
eq_(self.app.weekly_downloads, 255)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_total_downloads(self, _mock):
client = mock.Mock()
raw = {
'facets': {
'installs': {
u'_type': u'statistical',
u'count': 49,
u'total': 6638.0
}
}
}
client.raw.return_value = raw
_mock.return_value = client
eq_(self.app.total_downloads, 0)
update_downloads([self.app.pk])
self.app.reload()
eq_(self.app.total_downloads, 6638)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_monolith_error(self, _mock):
client = mock.Mock()
client.side_effect = ValueError
client.raw.side_effect = Exception
_mock.return_value = client
update_downloads([self.app.pk])
self.app.reload()
eq_(self.app.weekly_downloads, 0)
eq_(self.app.total_downloads, 0)
class TestCleanup(amo.tests.TestCase):
def setUp(self):
self.file = os.path.join(settings.SIGNED_APPS_REVIEWER_PATH,
'1', 'x.z')
def test_not_cleaned(self):
storage.open(self.file, 'w')
clean_old_signed()
assert storage.exists(self.file)
def test_cleaned(self):
storage.open(self.file, 'w')
clean_old_signed(-60)
assert not storage.exists(self.file)
@mock.patch('lib.crypto.packaged.sign_app')
class TestSignApps(amo.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = Addon.objects.get(id=337141)
self.app.update(is_packaged=True)
self.app2 = amo.tests.app_factory(
name=u'Mozillaball ょ', app_slug='test',
is_packaged=True, version_kw={'version': '1.0',
'created': None})
self.app3 = amo.tests.app_factory(
name='Test app 3', app_slug='test3', status=amo.STATUS_REJECTED,
is_packaged=True, version_kw={'version': '1.0',
'created': None})
def test_by_webapp(self, sign_mock):
v1 = self.app.get_version()
call_command('sign_apps', webapps=str(v1.pk))
file1 = v1.all_files[0]
assert sign_mock.called_with(((file1.file_path,
file1.signed_file_path),))
def test_all(self, sign_mock):
v1 = self.app.get_version()
v2 = self.app2.get_version()
call_command('sign_apps')
file1 = v1.all_files[0]
file2 = v2.all_files[0]
eq_(len(sign_mock.mock_calls), 2)
eq_(sign_mock.mock_calls[0][1][:2],
(file1.file_path, file1.signed_file_path))
eq_(sign_mock.mock_calls[1][1][:2],
(file2.file_path, file2.signed_file_path))
class TestUpdateTrending(amo.tests.TestCase):
def setUp(self):
self.app = Webapp.objects.create(type=amo.ADDON_WEBAPP,
status=amo.STATUS_PUBLIC)
@mock.patch('mkt.webapps.tasks._get_trending')
def test_trending_saved(self, _mock):
_mock.return_value = 12.0
update_app_trending()
eq_(self.app.get_trending(), 12.0)
for region in mkt.regions.REGIONS_DICT.values():
eq_(self.app.get_trending(region=region), 12.0)
# Test running again updates the values as we'd expect.
_mock.return_value = 2.0
update_app_trending()
eq_(self.app.get_trending(), 2.0)
for region in mkt.regions.REGIONS_DICT.values():
eq_(self.app.get_trending(region=region), 2.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending(self, _mock):
client = mock.Mock()
client.return_value = [
{'count': 133.0, 'date': date(2013, 8, 26)},
{'count': 122.0, 'date': date(2013, 9, 2)},
]
_mock.return_value = client
# 1st week count: 133 + 122 = 255
# Prior 3 weeks get averaged: (133 + 122) / 3 = 85
# (255 - 85) / 85 = 2.0
eq_(_get_trending(self.app.id), 2.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending_threshold(self, _mock):
client = mock.Mock()
client.return_value = [
{'count': 49.0, 'date': date(2013, 8, 26)},
{'count': 50.0, 'date': date(2013, 9, 2)},
]
_mock.return_value = client
# 1st week count: 49 + 50 = 99
# 99 is less than 100 so we return 0.0.
eq_(_get_trending(self.app.id), 0.0)
@mock.patch('mkt.webapps.tasks.get_monolith_client')
def test_get_trending_monolith_error(self, _mock):
client = mock.Mock()
client.side_effect = ValueError
_mock.return_value = client
eq_(_get_trending(self.app.id), 0.0)
@mock.patch('os.stat')
@mock.patch('os.listdir')
@mock.patch('os.remove')
class TestGarbage(amo.tests.TestCase):
def setUp(self):
self.user = UserProfile.objects.create(
email='[email protected]', name='gc_test')
amo.log(amo.LOG.CUSTOM_TEXT, 'testing', user=self.user,
created=datetime(2001, 1, 1))
def test_garbage_collection(self, rm_mock, ls_mock, stat_mock):
eq_(ActivityLog.objects.all().count(), 1)
mkt_gc()
eq_(ActivityLog.objects.all().count(), 0)
def test_nonce(self, rm_mock, ls_mock, stat_mock):
nonce = Nonce.objects.create(nonce='a', timestamp=1, client_key='b')
nonce.update(created=self.days_ago(2))
eq_(Nonce.objects.count(), 1)
mkt_gc()
eq_(Nonce.objects.count(), 0)
def test_dump_delete(self, rm_mock, ls_mock, stat_mock):
ls_mock.return_value = ['lol']
stat_mock.return_value = StatMock(days_ago=1000)
mkt_gc()
assert rm_mock.call_args_list[0][0][0].endswith('lol')
def test_new_no_delete(self, rm_mock, ls_mock, stat_mock):
ls_mock.return_value = ['lol']
stat_mock.return_value = StatMock(days_ago=1)
mkt_gc()
assert not rm_mock.called
class StatMock(object):
def __init__(self, days_ago):
self.st_mtime = time.mktime(
(datetime.now() - timedelta(days_ago)).timetuple())
self.st_size = 100
|
the-stack_106_15740
|
from textwrap import wrap
from sqlalchemy.orm import joinedload
from clld.web.datatables.value import Values
from clld.web.datatables.parameter import Parameters
from clld.web.datatables.contributor import Contributors, NameCol, UrlCol
from clld.web.datatables.source import Sources, TypeCol
from clld.web.datatables.language import Languages
from clld.web.datatables.base import LinkCol, Col, DataTable, LinkToMapCol, DetailsRowLinkCol
from clld.web.util.htmllib import HTML
from clld.web.util.helpers import icon, link, external_link
from clld.db.meta import DBSession
from clld.db.models import common
from clld.db.util import get_distinct_values
from dogonlanguages import models
from dogonlanguages import util
class DogonLanguages(Languages):
def base_query(self, query):
return Languages.base_query(self, query).filter(models.Languoid.in_project == True)
class ProjectMembers(Contributors):
def base_query(self, query):
return query.filter(models.Member.in_project == True)
def col_defs(self):
return [
NameCol(self, 'name'),
Col(self, 'description'),
UrlCol(self, 'Homepage'),
]
class VideosCol(Col):
__kw__ = {'input_size': 'mini'}
def format(self, item):
item = self.get_obj(item)
if item.count_videos:
return HTML.span(
'%s' % item.count_videos, icon('film', inverted=True), class_='badge')
return ''
class ImagesCol(Col):
__kw__ = {'input_size': 'mini'}
def format(self, item):
item = self.get_obj(item)
if item.count_images:
return HTML.span(
'%s' % item.count_images, icon('camera', inverted=True), class_='badge')
return ''
class TsammalexCol(Col):
def format(self, item):
return util.tsammalex_link(self.dt.req, item)
class ConcepticonCol(Col):
def format(self, item):
return util.concepticon_link(self.dt.req, item)
class Concepts(Parameters):
def base_query(self, query):
return query.join(models.Subdomain).join(models.Domain)
def col_defs(self):
return [
Col(self, 'ID', model_col=common.Parameter.id),
LinkCol(self, 'gloss', model_col=common.Parameter.name),
Col(self, 'domain',
choices=get_distinct_values(models.Domain.name),
get_object=lambda i: i.subdomain.domain,
model_col=models.Domain.name),
Col(self, 'subdomain',
choices=get_distinct_values(models.Subdomain.name),
get_object=lambda i: i.subdomain,
model_col=models.Subdomain.name),
ConcepticonCol(
self, 'concepticon',
input_size='mini',
model_col=models.Concept.concepticon_id),
TsammalexCol(
self, 'tsammalex',
input_size='mini',
model_col=models.Concept.tsammalex_taxon),
ImagesCol(self, 'images', model_col=models.Concept.count_images),
VideosCol(self, 'videos', model_col=models.Concept.count_videos),
]
class Words(Values):
def __init__(self, req, model, **kw):
Values.__init__(self, req, model, **kw)
self.ff = False
if kw.get('ff') or 'ff' in req.params:
self.ff = True
def xhr_query(self):
res = Values.xhr_query(self)
if self.ff:
res['ff'] = 1
return res
def base_query(self, query):
query = query.join(common.ValueSet)
if self.language:
query = query.join(common.ValueSet.parameter)\
.join(models.Subdomain)\
.join(models.Domain)
return query.filter(common.ValueSet.language_pk == self.language.pk)
if self.parameter:
query = query.join(common.ValueSet.language)
return query.filter(common.ValueSet.parameter_pk == self.parameter.pk)
query = query\
.join(common.Parameter)\
.join(common.Language)\
.join(models.Subdomain)\
.join(models.Domain)\
.options(
joinedload(common.Value.valueset)
.joinedload(common.ValueSet.parameter)
.joinedload(models.Concept.subdomain)
.joinedload(models.Subdomain.domain),
joinedload(common.Value.valueset, common.ValueSet.language)
)
if self.ff:
query = query.filter(models.Domain.name.in_(['flora', 'fauna']))
return query
def col_defs(self):
if self.language:
return [
LinkCol(
self, 'concept',
get_object=lambda item: item.valueset.parameter,
model_col=common.Parameter.name),
Col(self, 'domain',
get_object=lambda item: item.valueset.parameter.subdomain.domain,
model_col=models.Domain.name),
Col(self, 'subdomain',
get_object=lambda item: item.valueset.parameter.subdomain,
model_col=models.Subdomain.name),
Col(self, 'word', model_col=common.Value.name),
]
if self.parameter:
return [
LinkCol(
self, 'language',
get_object=lambda item: item.valueset.language,
model_col=common.Language.name),
Col(self, 'word', model_col=common.Value.name),
]
res = [
LinkCol(
self, 'language',
get_object=lambda item: item.valueset.language,
model_col=common.Language.name),
LinkCol(
self, 'concept',
get_object=lambda item: item.valueset.parameter,
model_col=common.Parameter.name),
ImagesCol(
self, '#',
get_object=lambda i: i.valueset.parameter,
model_col=models.Concept.count_images),
Col(self, 'domain',
get_object=lambda item: item.valueset.parameter.subdomain.domain,
model_col=models.Domain.name),
Col(self, 'subdomain',
get_object=lambda item: item.valueset.parameter.subdomain,
model_col=models.Subdomain.name),
Col(self, 'word', model_col=common.Value.name),
Col(self, 'literal meaning', model_col=common.Value.description),
Col(self, 'note', model_col=models.Counterpart.comment),
]
return res
class LanguoidCol(Col):
def __init__(self, dt, name, **kw):
sq = DBSession.query(models.Village.languoid_pk).distinct().subquery()
kw['choices'] = [
(l.id, l.name) for l in
DBSession.query(common.Language).filter(common.Language.pk.in_(sq))]
Col.__init__(self, dt, name, **kw)
def search(self, qs):
return common.Language.id == qs
def order(self):
return common.Language.name
def format(self, item):
if item.languoid:
if item.languoid.in_project:
return link(self.dt.req, item.languoid)
return item.languoid.name
class ImageCol(Col):
__kw__ = dict(bSearchable=False, bSortable=False)
def format(self, item):
if item._files:
return HTML.span('%s' % len(item._files), icon('camera', inverted=True), class_='badge')
return ''
class Villages(DataTable):
def base_query(self, query):
return query\
.outerjoin(models.Village.languoid)\
.outerjoin(models.Village._files)\
.options(
joinedload(models.Village.languoid),
joinedload(models.Village._files),
)
def col_defs(self):
return [
LinkCol(self, 'name'),
LinkToMapCol(self, '_'),
ImageCol(self, '#'),
LanguoidCol(self, 'language (group)'),
Col(self, 'major city', model_col=models.Village.major_city),
Col(self, 'surnames', model_col=models.Village.surnames),
]
class DocumentCol(Col):
__kw__ = dict(bSearchable=False, bSortable=False)
def format(self, item):
if item._files:
return HTML.span('%s' % len(item._files), icon('file', inverted=True), class_='badge')
return ''
class Documents(Sources):
def col_defs(self):
return [
DetailsRowLinkCol(self, 'd'),
LinkCol(self, 'name'),
Col(self, 'description', sTitle='Title'),
Col(self, 'year', input_size='mini'),
Col(self, 'author'),
Col(self, 'DLP', sTitle='DLP', model_col=models.Document.project_doc),
Col(self, 'type',
input_size='mini',
model_col=models.Document.doctype,
choices=get_distinct_values(models.Document.doctype)),
DocumentCol(self, '#'),
TypeCol(self, 'bibtex_type'),
]
class ViewCol(Col):
__kw__ = dict(bSearchable=False, bSortable=False)
def format(self, item):
return HTML.a(
icon('eye-open'),
href=util.cdstar_url(item),
title='view',
class_="btn")
def wrap_fname(i):
return '_ ... _'.join(
s.replace(' ', '_') for s in wrap(i.name.replace('_', ' '), width=60))
class Files(DataTable):
def col_defs(self):
return [
DetailsRowLinkCol(self, '#', button_text='view'),
Col(self, 'name', format=wrap_fname),
ViewCol(self, 'view'),
Col(self, 'size', format=lambda i: util.format_size(i)),
Col(self, 'mime_type', sTitle='media type', choices=get_distinct_values(models.File.mime_type)),
Col(self, 'date', model_col=models.File.date_created, bSearchable=False),
# TODO: link to download!
Col(self, 'id', sTitle='MD5 checksum'),
]
class PlaceCol(Col):
def format(self, item):
if item.village:
return link(self.dt.req, item.village, label=item.place)
return item.place
class FileCol(Col):
__kw__ = {'bSearchable': False, 'bSortable': False}
def __init__(self, dt, name, **kw):
self.subtype = kw.pop('subtype', name)
kw['sDescription'] = {
'mp4': external_link('https://en.wikipedia.org/wiki/MPEG-4'),
'quicktime': external_link('https://en.wikipedia.org/wiki/QuickTime'),
'x-msvideo': external_link('https://en.wikipedia.org/wiki/Audio_Video_Interleave'),
}[self.subtype]
kw['sTitle'] = name.upper()
Col.__init__(self, dt, name, **kw)
def format(self, item):
f = item.get_file(self.subtype)
if f:
return HTML.a(' ' + util.format_file(f, with_mime_type=False), href=util.cdstar_url(f))
return ''
class Movies(DataTable):
def col_defs(self):
return [
DetailsRowLinkCol(self, '#', button_text='watch'),
Col(self, 'name'),
Col(self, 'description', sTitle='Category', choices=get_distinct_values(models.Movie.description)),
Col(self, 'duration', format=lambda i: util.format_duration(i)),
PlaceCol(self, 'place'),
#
# TODO: avi/qt/mp4
#
FileCol(self, 'mp4'),
FileCol(self, 'quicktime'),
FileCol(self, 'avi', subtype='x-msvideo'),
]
def includeme(config):
config.register_datatable('contributors', ProjectMembers)
config.register_datatable('languages', DogonLanguages)
config.register_datatable('parameters', Concepts)
config.register_datatable('values', Words)
config.register_datatable('villages', Villages)
config.register_datatable('files', Files)
config.register_datatable('movies', Movies)
config.register_datatable('sources', Documents)
|
the-stack_106_15743
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from page_sets.login_helpers import google_login
class TopPages(page_module.Page):
def __init__(self,
url,
page_set,
shared_page_state_class,
name='',
extra_browser_args=None):
if name == '':
name = url
super(TopPages, self).__init__(
url=url,
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
class GoogleWebSearchPage(TopPages):
""" Why: top google property; a google tab is often open """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='',
extra_browser_args=None):
super(GoogleWebSearchPage, self).__init__(
url='https://www.google.com/#hl=en&q=barack+obama',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(GoogleWebSearchPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Next')
class GoogleImageSearchPage(TopPages):
""" Why: tough image case; top google properties """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='',
extra_browser_args=None):
super(GoogleImageSearchPage, self).__init__(
'https://www.google.com/search?q=cats&tbm=isch',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
google_login.LoginGoogleAccount(action_runner, 'googletest')
super(GoogleImageSearchPage, self).RunNavigateSteps(action_runner)
class GmailPage(TopPages):
""" Why: productivity, top google properties """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='',
extra_browser_args=None):
super(GmailPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
google_login.LoginGoogleAccount(action_runner, 'googletest')
super(GmailPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
class GoogleCalendarPage(TopPages):
""" Why: productivity, top google properties """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='',
extra_browser_args=None):
super(GoogleCalendarPage, self).__init__(
url='https://www.google.com/calendar/',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
google_login.LoginGoogleAccount(action_runner, 'googletest')
super(GoogleCalendarPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navForward"]')
action_runner.ExecuteJavaScript("""
(function() {
var elem = document.createElement('meta');
elem.name='viewport';
elem.content='initial-scale=1';
document.body.appendChild(elem);
})();""")
action_runner.Wait(1)
class GoogleDocPage(TopPages):
""" Why: productivity, top google properties; Sample doc in the link """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='Docs_(1_open_document_tab)',
extra_browser_args=None):
super(GoogleDocPage, self).__init__(
# pylint: disable=line-too-long
url=
'https://docs.google.com/document/d/1X-IKNjtEnx-WW5JIKRLsyhz5sbsat3mfTpAPUSX3_s4/view',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
google_login.LoginGoogleAccount(action_runner, 'googletest')
super(GoogleDocPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
action_runner.WaitForJavaScriptCondition(
'document.getElementsByClassName("kix-appview-editor").length')
class GooglePlusPage(TopPages):
""" Why: social; top google property; Public profile; infinite scrolls """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='',
extra_browser_args=None):
super(GooglePlusPage, self).__init__(
url='https://plus.google.com/110031535020051778989/posts',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
google_login.LoginGoogleAccount(action_runner, 'googletest')
super(GooglePlusPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Home')
class YoutubePage(TopPages):
""" Why: #3 (Alexa global) """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='',
extra_browser_args=None):
super(YoutubePage, self).__init__(
url='http://www.youtube.com',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
google_login.LoginGoogleAccount(action_runner, 'googletest')
super(YoutubePage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
class BlogspotPage(TopPages):
""" Why: #11 (Alexa global), google property; some blogger layouts have
infinite scroll but more interesting """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='Blogger',
extra_browser_args=None):
super(BlogspotPage, self).__init__(
url='http://googlewebmastercentral.blogspot.com/',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(BlogspotPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='accessibility')
class WordpressPage(TopPages):
""" Why: #18 (Alexa global), Picked an interesting post """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='Wordpress',
extra_browser_args=None):
super(WordpressPage, self).__init__(
# pylint: disable=line-too-long
url=
'http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(WordpressPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(
# pylint: disable=line-too-long
'a[href="http://en.blog.wordpress.com/2012/08/30/new-themes-able-and-sight/"]'
)
class FacebookPage(TopPages):
""" Why: top social,Public profile """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='Facebook',
extra_browser_args=None):
super(FacebookPage, self).__init__(
url='https://www.facebook.com/barackobama',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(FacebookPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Videos')
class LinkedinPage(TopPages):
""" Why: #12 (Alexa global), Public profile. """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='LinkedIn',
extra_browser_args=None):
super(LinkedinPage, self).__init__(
url='http://www.linkedin.com/in/linustorvalds',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
class WikipediaPage(TopPages):
""" Why: #6 (Alexa) most visited worldwide,Picked an interesting page. """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='Wikipedia_(1_tab)',
extra_browser_args=None):
super(WikipediaPage, self).__init__(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
class TwitterPage(TopPages):
""" Why: #8 (Alexa global),Picked an interesting page """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='Twitter',
extra_browser_args=None):
super(TwitterPage, self).__init__(
url='https://twitter.com/katyperry',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(TwitterPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
class PinterestPage(TopPages):
""" Why: #37 (Alexa global) """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='Pinterest',
extra_browser_args=None):
super(PinterestPage, self).__init__(
url='http://pinterest.com',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
class ESPNPage(TopPages):
""" Why: #1 sports """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='ESPN',
extra_browser_args=None):
super(ESPNPage, self).__init__(
url='http://espn.go.com',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
class WeatherPage(TopPages):
""" Why: #7 (Alexa news); #27 total time spent, picked interesting page. """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='Weather.com',
extra_browser_args=None):
super(WeatherPage, self).__init__(
url='http://www.weather.com/weather/right-now/Mountain+View+CA+94043',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
class YahooGamesPage(TopPages):
""" Why: #1 games according to Alexa (with actual games in it) """
def __init__(self,
page_set,
shared_page_state_class=shared_page_state.SharedPageState,
name='',
extra_browser_args=None):
super(YahooGamesPage, self).__init__(
url='http://games.yahoo.com',
page_set=page_set,
shared_page_state_class=shared_page_state_class,
name=name,
extra_browser_args=extra_browser_args)
def RunNavigateSteps(self, action_runner):
super(YahooGamesPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
|
the-stack_106_15745
|
from model.news.News import News
import pandas as pd
from run import db
import traceback
class NewsDao(object):
def get_all_news(self):
pd.set_option('max_colwidth', 20000)
list = []
df = pd.read_csv('data/news.csv').values
for i in range(30):
# print(df[i][1])
news = News(df[i][1], df[i][2], df[i][3])
list.append(news)
return list
def get_one_news(self, id):
try:
session = db.session()
news = session.query(News).filter(News.id == id)[0]
session.close()
return news
except:
print("error")
traceback.print_exc()
finally:
session.close()
pass
|
the-stack_106_15747
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pretend
import pytest
from webob.multidict import MultiDict
from pyramid.httpexceptions import HTTPNotFound, HTTPBadRequest
from warehouse import views
from warehouse.views import (
classifiers,
current_user_indicator,
forbidden,
health,
httpexception_view,
index,
robotstxt,
opensearchxml,
search,
force_status,
flash_messages,
forbidden_include,
)
from ..common.db.accounts import UserFactory
from ..common.db.classifiers import ClassifierFactory
from ..common.db.packaging import ProjectFactory, ReleaseFactory, FileFactory
class TestHTTPExceptionView:
def test_returns_context_when_no_template(self, pyramid_config):
pyramid_config.testing_add_renderer("non-existent.html")
response = context = pretend.stub(status_code=499)
request = pretend.stub()
assert httpexception_view(context, request) is response
@pytest.mark.parametrize("status_code", [403, 404, 410, 500])
def test_renders_template(self, pyramid_config, status_code):
renderer = pyramid_config.testing_add_renderer("{}.html".format(status_code))
context = pretend.stub(
status="{} My Cool Status".format(status_code),
status_code=status_code,
headers={},
)
request = pretend.stub()
response = httpexception_view(context, request)
assert response.status_code == status_code
assert response.status == "{} My Cool Status".format(status_code)
renderer.assert_()
@pytest.mark.parametrize("status_code", [403, 404, 410, 500])
def test_renders_template_with_headers(self, pyramid_config, status_code):
renderer = pyramid_config.testing_add_renderer("{}.html".format(status_code))
context = pretend.stub(
status="{} My Cool Status".format(status_code),
status_code=status_code,
headers={"Foo": "Bar"},
)
request = pretend.stub()
response = httpexception_view(context, request)
assert response.status_code == status_code
assert response.status == "{} My Cool Status".format(status_code)
assert response.headers["Foo"] == "Bar"
renderer.assert_()
def test_renders_404_with_csp(self, pyramid_config):
renderer = pyramid_config.testing_add_renderer("404.html")
csp = {}
services = {"csp": pretend.stub(merge=csp.update)}
context = HTTPNotFound()
request = pretend.stub(find_service=lambda name: services[name], path="")
response = httpexception_view(context, request)
assert response.status_code == 404
assert response.status == "404 Not Found"
assert csp == {
"frame-src": ["https://www.youtube-nocookie.com"],
"script-src": ["https://www.youtube.com", "https://s.ytimg.com"],
}
renderer.assert_()
def test_simple_404(self):
csp = {}
services = {"csp": pretend.stub(merge=csp.update)}
context = HTTPNotFound()
for path in ("/simple/not_found_package", "/simple/some/unusual/path/"):
request = pretend.stub(find_service=lambda name: services[name], path=path)
response = httpexception_view(context, request)
assert response.status_code == 404
assert response.status == "404 Not Found"
assert response.content_type == "text/plain"
assert response.text == "404 Not Found"
class TestForbiddenView:
def test_logged_in_returns_exception(self, pyramid_config):
renderer = pyramid_config.testing_add_renderer("403.html")
exc = pretend.stub(status_code=403, status="403 Forbidden", headers={})
request = pretend.stub(authenticated_userid=1)
resp = forbidden(exc, request)
assert resp.status_code == 403
renderer.assert_()
def test_logged_out_redirects_login(self):
exc = pretend.stub()
request = pretend.stub(
authenticated_userid=None,
path_qs="/foo/bar/?b=s",
route_url=pretend.call_recorder(
lambda route, _query: "/accounts/login/?next=/foo/bar/%3Fb%3Ds"
),
)
resp = forbidden(exc, request)
assert resp.status_code == 303
assert resp.headers["Location"] == "/accounts/login/?next=/foo/bar/%3Fb%3Ds"
class TestForbiddenIncludeView:
def test_forbidden_include(self):
exc = pretend.stub()
request = pretend.stub()
resp = forbidden_include(exc, request)
assert resp.status_code == 403
assert resp.content_type == "text/html"
assert resp.content_length == 0
def test_robotstxt(pyramid_request):
assert robotstxt(pyramid_request) == {}
assert pyramid_request.response.content_type == "text/plain"
def test_opensearchxml(pyramid_request):
assert opensearchxml(pyramid_request) == {}
assert pyramid_request.response.content_type == "text/xml"
class TestIndex:
def test_index(self, db_request):
project = ProjectFactory.create()
release1 = ReleaseFactory.create(project=project)
release1.created = datetime.date(2011, 1, 1)
release2 = ReleaseFactory.create(project=project)
release2.created = datetime.date(2012, 1, 1)
FileFactory.create(
release=release1,
filename="{}-{}.tar.gz".format(project.name, release1.version),
python_version="source",
)
UserFactory.create()
assert index(db_request) == {
# assert that ordering is correct
"latest_releases": [release2, release1],
"trending_projects": [release2],
"num_projects": 1,
"num_users": 3,
"num_releases": 2,
"num_files": 1,
}
def test_esi_current_user_indicator():
assert current_user_indicator(pretend.stub()) == {}
def test_esi_flash_messages():
assert flash_messages(pretend.stub()) == {}
class TestSearch:
@pytest.mark.parametrize("page", [None, 1, 5])
def test_with_a_query(self, monkeypatch, db_request, page):
params = MultiDict({"q": "foo bar"})
if page is not None:
params["page"] = page
db_request.params = params
sort = pretend.stub()
suggest = pretend.stub(sort=pretend.call_recorder(lambda *a, **kw: sort))
es_query = pretend.stub(suggest=pretend.call_recorder(lambda *a, **kw: suggest))
db_request.es = pretend.stub(
query=pretend.call_recorder(lambda *a, **kw: es_query)
)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
assert search(db_request) == {
"page": page_obj,
"term": params.get("q", ""),
"order": params.get("o", ""),
"applied_filters": [],
"available_filters": [],
}
assert page_cls.calls == [
pretend.call(suggest, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert db_request.es.query.calls == [
pretend.call(views.gather_es_queries(params["q"]))
]
assert es_query.suggest.calls == [
pretend.call("name_suggestion", params["q"], term={"field": "name"})
]
@pytest.mark.parametrize("page", [None, 1, 5])
def test_with_exact_phrase_query(self, monkeypatch, db_request, page):
params = MultiDict({"q": '"foo bar"'})
if page is not None:
params["page"] = page
db_request.params = params
sort = pretend.stub()
suggest = pretend.stub(sort=pretend.call_recorder(lambda *a, **kw: sort))
es_query = pretend.stub(suggest=pretend.call_recorder(lambda *a, **kw: suggest))
db_request.es = pretend.stub(
query=pretend.call_recorder(lambda *a, **kw: es_query)
)
page_obj = pretend.stub(
page_count=(page or 1) + 10, item_count=(page or 1) + 10
)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
assert search(db_request) == {
"page": page_obj,
"term": params.get("q", ""),
"order": params.get("o", ""),
"applied_filters": [],
"available_filters": [],
}
assert page_cls.calls == [
pretend.call(suggest, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert db_request.es.query.calls == [
pretend.call(views.gather_es_queries(params["q"]))
]
assert es_query.suggest.calls == [
pretend.call("name_suggestion", params["q"], term={"field": "name"})
]
@pytest.mark.parametrize("page", [None, 1, 5])
def test_with_a_single_char_query(self, monkeypatch, db_request, page):
params = MultiDict({"q": "a"})
if page is not None:
params["page"] = page
db_request.params = params
sort = pretend.stub()
suggest = pretend.stub(sort=pretend.call_recorder(lambda *a, **kw: sort))
es_query = pretend.stub(suggest=pretend.call_recorder(lambda *a, **kw: suggest))
db_request.es = pretend.stub(
query=pretend.call_recorder(lambda *a, **kw: es_query)
)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
assert search(db_request) == {
"page": page_obj,
"term": params.get("q", ""),
"order": params.get("o", ""),
"applied_filters": [],
"available_filters": [],
}
assert page_cls.calls == [
pretend.call(suggest, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert db_request.es.query.calls == [
pretend.call(views.gather_es_queries(params["q"]))
]
assert es_query.suggest.calls == [
pretend.call("name_suggestion", params["q"], term={"field": "name"})
]
assert db_request.registry.datadog.histogram.calls == [
pretend.call("warehouse.views.search.results", 1000)
]
@pytest.mark.parametrize(
("page", "order", "expected"),
[
(None, None, []),
(1, "-created", [{"created": {"order": "desc", "unmapped_type": "long"}}]),
(5, "created", [{"created": {"unmapped_type": "long"}}]),
],
)
def test_with_an_ordering(self, monkeypatch, db_request, page, order, expected):
params = MultiDict({"q": "foo bar"})
if page is not None:
params["page"] = page
if order is not None:
params["o"] = order
db_request.params = params
sort = pretend.stub()
suggest = pretend.stub(sort=pretend.call_recorder(lambda *a, **kw: sort))
es_query = pretend.stub(suggest=pretend.call_recorder(lambda *a, **kw: suggest))
db_request.es = pretend.stub(
query=pretend.call_recorder(lambda *a, **kw: es_query)
)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
assert search(db_request) == {
"page": page_obj,
"term": params.get("q", ""),
"order": params.get("o", ""),
"applied_filters": [],
"available_filters": [],
}
assert page_cls.calls == [
pretend.call(
sort if order is not None else suggest,
url_maker=url_maker,
page=page or 1,
)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert db_request.es.query.calls == [
pretend.call(views.gather_es_queries(params["q"]))
]
assert es_query.suggest.calls == [
pretend.call("name_suggestion", params["q"], term={"field": "name"})
]
assert suggest.sort.calls == [pretend.call(i) for i in expected]
@pytest.mark.parametrize("page", [None, 1, 5])
def test_with_classifiers(self, monkeypatch, db_request, page):
params = MultiDict([("q", "foo bar"), ("c", "foo :: bar"), ("c", "fiz :: buz")])
if page is not None:
params["page"] = page
db_request.params = params
es_query = pretend.stub(
suggest=pretend.call_recorder(lambda *a, **kw: es_query),
filter=pretend.call_recorder(lambda *a, **kw: es_query),
sort=pretend.call_recorder(lambda *a, **kw: es_query),
)
db_request.es = pretend.stub(
query=pretend.call_recorder(lambda *a, **kw: es_query)
)
classifier1 = ClassifierFactory.create(classifier="foo :: bar")
classifier2 = ClassifierFactory.create(classifier="foo :: baz")
classifier3 = ClassifierFactory.create(classifier="fiz :: buz")
project = ProjectFactory.create()
release1 = ReleaseFactory.create(project=project)
release1.created = datetime.date(2011, 1, 1)
release1._classifiers.append(classifier1)
release1._classifiers.append(classifier2)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
search_view = search(db_request)
assert search_view == {
"page": page_obj,
"term": params.get("q", ""),
"order": params.get("o", ""),
"applied_filters": params.getall("c"),
"available_filters": [
("foo", [classifier1.classifier, classifier2.classifier])
],
}
assert ("fiz", [classifier3.classifier]) not in search_view["available_filters"]
assert page_cls.calls == [
pretend.call(es_query, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert db_request.es.query.calls == [
pretend.call(views.gather_es_queries(params["q"]))
]
assert es_query.suggest.calls == [
pretend.call("name_suggestion", params["q"], term={"field": "name"})
]
assert es_query.filter.calls == [
pretend.call("terms", classifiers=["foo :: bar"]),
pretend.call("terms", classifiers=["fiz :: buz"]),
]
@pytest.mark.parametrize("page", [None, 1, 5])
def test_without_a_query(self, monkeypatch, db_request, page):
params = MultiDict()
if page is not None:
params["page"] = page
db_request.params = params
es_query = pretend.stub()
db_request.es = pretend.stub(query=lambda *a, **kw: es_query)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
assert search(db_request) == {
"page": page_obj,
"term": params.get("q", ""),
"order": params.get("o", ""),
"applied_filters": [],
"available_filters": [],
}
assert page_cls.calls == [
pretend.call(es_query, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
def test_returns_404_with_pagenum_too_high(self, monkeypatch, db_request):
params = MultiDict({"page": 15})
db_request.params = params
es_query = pretend.stub()
db_request.es = pretend.stub(query=lambda *a, **kw: es_query)
page_obj = pretend.stub(page_count=10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
resp = search(db_request)
assert isinstance(resp, HTTPNotFound)
assert page_cls.calls == [
pretend.call(es_query, url_maker=url_maker, page=15 or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
def test_raises_400_with_pagenum_type_str(self, monkeypatch, db_request):
params = MultiDict({"page": "abc"})
db_request.params = params
es_query = pretend.stub()
db_request.es = pretend.stub(query=lambda *a, **kw: es_query)
page_obj = pretend.stub(page_count=10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "ElasticsearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
with pytest.raises(HTTPBadRequest):
search(db_request)
assert page_cls.calls == []
def test_classifiers(db_request):
classifier_a = ClassifierFactory(classifier="I am first")
classifier_b = ClassifierFactory(classifier="I am last")
assert classifiers(db_request) == {
"classifiers": [(classifier_a.classifier,), (classifier_b.classifier,)]
}
def test_health():
request = pretend.stub(
db=pretend.stub(execute=pretend.call_recorder(lambda q: None))
)
assert health(request) == "OK"
assert request.db.execute.calls == [pretend.call("SELECT 1")]
class TestForceStatus:
def test_valid(self):
with pytest.raises(HTTPBadRequest):
force_status(pretend.stub(matchdict={"status": "400"}))
def test_invalid(self):
with pytest.raises(HTTPNotFound):
force_status(pretend.stub(matchdict={"status": "599"}))
|
the-stack_106_15749
|
import datetime
from django.contrib.auth.models import User
from django.forms import ModelForm, DateInput, forms, CheckboxSelectMultiple
from .models import LeaveRegistration, Entitlement
class LeaveRegistrationForm(ModelForm):
required_css_class = 'required'
class Meta:
model = LeaveRegistration
fields = [
'from_date', 'end_date', 'amount_of_hours'
]
widgets = {
'from_date': DateInput(attrs={'type': 'date'}),
'end_date': DateInput(attrs={'type': 'date'})
}
def __init__(self, years, *args, **kwargs):
self.years = years
super(LeaveRegistrationForm, self).__init__(*args, **kwargs)
def clean(self):
from_date = self.cleaned_data.get('from_date')
end_date = self.cleaned_data.get('end_date')
if not isinstance(from_date, datetime.date) or not isinstance(end_date, datetime.date):
raise forms.ValidationError("Vul een geldige datum in.")
from_year = from_date.year
end_year = end_date.year
if from_year != end_year:
raise forms.ValidationError(
"Je kan voor 1 kalenderjaar tegelijk invullen. Zorg dat begin- en einddatum in het zelfde jaar liggen.")
if end_date < from_date:
raise forms.ValidationError("De einddatum ligt voor de begindatum")
if from_year not in self.years:
raise forms.ValidationError("Dit jaar is (nog) niet beschikbaar")
return self.cleaned_data
class UserForm(ModelForm):
required_css_class = 'required'
class Meta:
model = User
fields = [
'username', 'first_name', 'last_name', 'email', 'is_active', 'groups'
]
widgets = {
'groups': CheckboxSelectMultiple()
}
class EntitlementForm(ModelForm):
required_css_class = 'required'
class Meta:
model = Entitlement
fields = [
'year', 'leave_hours'
]
def __init__(self, *args, **kwargs):
self.years = kwargs.pop('years')
super(EntitlementForm, self).__init__(*args, **kwargs)
def clean(self):
year = self.cleaned_data.get('year')
if year in self.years:
raise forms.ValidationError(
"Er zijn al verlofuren voor dit jaar ingevuld.")
return self.cleaned_data
class AdminEntitlementForm(ModelForm):
required_css_class = 'required'
class Meta:
model = Entitlement
fields = [
'leave_hours'
]
|
the-stack_106_15750
|
import sys
import argparse
sys.path.append('./src/')
from h02_learn.dataset import get_data_loaders
from h02_learn.model import BiaffineParser
from h02_learn.train import evaluate
from utils import constants
def get_args():
parser = argparse.ArgumentParser()
# Data
parser.add_argument('--language', type=str, required=True)
parser.add_argument('--data-path', type=str, default='data/')
parser.add_argument('--batch-size', type=int, default=128)
# Model
parser.add_argument('--checkpoints-path', type=str, default='checkpoints/')
return parser.parse_args()
def load_model(checkpoints_path, language):
load_path = '%s/%s/' % (checkpoints_path, language)
return BiaffineParser.load(load_path).to(device=constants.device)
def main():
# pylint: disable=too-many-locals
args = get_args()
trainloader, devloader, testloader, _, _ = \
get_data_loaders(args.data_path, args.language, args.batch_size, args.batch_size)
print('Train size: %d Dev size: %d Test size: %d' %
(len(trainloader.dataset), len(devloader.dataset), len(testloader.dataset)))
model = load_model(args.checkpoints_path, args.language)
train_loss, train_las, train_uas = evaluate(trainloader, model)
dev_loss, dev_las, dev_uas = evaluate(devloader, model)
test_loss, test_las, test_uas = evaluate(testloader, model)
print('Final Training loss: %.4f Dev loss: %.4f Test loss: %.4f' %
(train_loss, dev_loss, test_loss))
print('Final Training las: %.4f Dev las: %.4f Test las: %.4f' %
(train_las, dev_las, test_las))
print('Final Training uas: %.4f Dev uas: %.4f Test uas: %.4f' %
(train_uas, dev_uas, test_uas))
if __name__ == '__main__':
main()
|
the-stack_106_15751
|
# Encoding: UTF-8
import pytest
parametrize = pytest.mark.parametrize
@parametrize(
('input', 'table', 'id'),
[
# Simple lookups
(u'Eevee', 'pokemon_species',133),
(u'Scratch', 'moves', 10),
(u'Master Ball', 'items', 1),
(u'normal', 'types', 1),
(u'Run Away', 'abilities', 50),
# Funny characters
(u'Mr. Mime', 'pokemon_species', 122),
(u"Farfetch’d", 'pokemon_species', 83),
(u'Poké Ball', 'items', 4),
# Forms
(u'Rotom', 'pokemon_species', 479),
(u'Wash Rotom', 'pokemon_forms', 10059),
(u'East Shellos', 'pokemon_forms', 10039),
# Other languages
(u'イーブイ', 'pokemon_species', 133),
(u'Iibui', 'pokemon_species', 133),
(u'Eievui', 'pokemon_species', 133),
(u'이브이', 'pokemon_species', 133),
(u'伊布', 'pokemon_species', 133),
(u'Evoli', 'pokemon_species', 133),
]
)
def test_exact_lookup(lookup, input, table, id):
results = lookup.lookup(input)
assert len(results) == 1
assert results[0].exact == True
row = results[0].object
assert row.__tablename__ == table
assert row.id == id
def test_id_lookup(lookup):
results = lookup.lookup(u'1')
assert len(results) >= 5
assert all(result.object.id == 1 for result in results)
def test_multi_lookup(lookup):
results = lookup.lookup(u'Metronome')
assert len(results) == 2
assert results[0].exact
def test_type_lookup(lookup):
results = lookup.lookup(u'pokemon:1')
assert results[0].object.__tablename__ == 'pokemon_species'
assert len(results) == 1
assert results[0].object.name == u'Bulbasaur'
results = lookup.lookup(u'1', valid_types=['pokemon_species'])
assert results[0].object.name == u'Bulbasaur'
def test_language_lookup(lookup):
# There are two objects named "charge": the move Charge, and the move
# Tackle, which is called "Charge" in French.
results = lookup.lookup(u'charge')
assert len(results) > 1
results = lookup.lookup(u'@fr:charge')
assert results[0].iso639 == u'fr'
assert len(results) == 1
assert results[0].object.name == u'Tackle'
results = lookup.lookup(u'charge', valid_types=['@fr'])
assert results[0].object.name == u'Tackle'
results = lookup.lookup(u'@fr,move:charge')
assert results[0].object.name == u'Tackle'
results = lookup.lookup(u'@fr:charge', valid_types=['move'])
assert results[0].object.name, u'Tackle'
@parametrize(
('misspelling', 'name'),
[
# Regular English names
(u'chamander', u'Charmander'),
(u'pokeball', u'Poké Ball'),
# Names with squiggles in them
(u'farfetchd', u"Farfetch’d"),
(u'porygonz', u'Porygon-Z'),
# Sufficiently long foreign names
(u'カクレオ', u'Kecleon'),
(u'Yamikrasu', u'Murkrow'),
]
)
def test_fuzzy_lookup(lookup, misspelling, name):
results = lookup.lookup(misspelling)
first_result = results[0]
assert first_result.object.name == name
def test_nidoran(lookup):
results = lookup.lookup(u'Nidoran')
top_names = [result.object.name for result in results[0:2]]
assert u'Nidoran♂' in top_names
assert u'Nidoran♀' in top_names
@parametrize(
('wildcard', 'name'),
[
(u'pokemon:*meleon', u'Charmeleon'),
(u'item:master*', u'Master Ball'),
(u'ee?ee', u'Eevee'),
]
)
def test_wildcard_lookup(lookup, wildcard, name):
results = lookup.lookup(wildcard)
first_result = results[0]
assert first_result.object.name == name
def test_bare_random(lookup):
for i in range(5):
results = lookup.lookup(u'random')
assert len(results) == 1
@parametrize(
'table_name',
[
u'pokemon_species',
u'moves',
u'items',
u'abilities',
u'types'
]
)
def test_qualified_random(lookup, table_name):
results = lookup.lookup(u'random', valid_types=[table_name])
assert len(results) == 1
assert results[0].object.__tablename__ == table_name
def test_crash_empty_prefix(lookup):
"""Searching for ':foo' used to crash, augh!"""
results = lookup.lookup(u':Eevee')
assert results[0].object.name == u'Eevee'
|
the-stack_106_15752
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choroplethmapbox"
_path_str = "choroplethmapbox.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choroplethmapbox.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choroplethmapbox.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_106_15753
|
import copy
import json
import os
from urllib.parse import parse_qs
from urllib.parse import urlparse
import pytest
import responses
from cryptojwt import as_unicode
from cryptojwt import b64d
from cryptojwt.key_jar import build_keyjar
from cryptojwt.utils import as_bytes
from oidcmsg.exception import InvalidRequest
from oidcmsg.message import Message
from oidcmsg.oidc import AuthorizationRequest
from oidcmsg.oidc import verified_claim_name
from oidcmsg.oidc import verify_id_token
from oidcmsg.time_util import time_sans_frac
from oidcendpoint.authn_event import create_authn_event
from oidcendpoint.cookie import CookieDealer
from oidcendpoint.cookie import new_cookie
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.exception import RedirectURIError
from oidcendpoint.oauth2.authorization import join_query
from oidcendpoint.oidc import userinfo
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.provider_config import ProviderConfiguration
from oidcendpoint.oidc.registration import Registration
from oidcendpoint.oidc.session import Session
from oidcendpoint.oidc.session import do_front_channel_logout_iframe
from oidcendpoint.oidc.token import Token
from oidcendpoint.session import session_key
from oidcendpoint.session import unpack_session_key
from oidcendpoint.session.grant import Grant
from oidcendpoint.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from oidcendpoint.user_info import UserInfo
ISS = "https://example.com/"
CLI1 = "https://client1.example.com/"
CLI2 = "https://client2.example.com/"
KEYDEFS = [
{"type": "RSA", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
]
KEYJAR = build_keyjar(KEYDEFS)
KEYJAR.import_jwks(KEYJAR.export_jwks(private=True), ISS)
RESPONSE_TYPES_SUPPORTED = [
["code"],
["token"],
["id_token"],
["code", "token"],
["code", "id_token"],
["id_token", "token"],
["code", "token", "id_token"],
["none"],
]
CAPABILITIES = {
"response_types_supported": [" ".join(x) for x in RESPONSE_TYPES_SUPPORTED],
"token_endpoint_auth_methods_supported": [
"client_secret_post",
"client_secret_basic",
"client_secret_jwt",
"private_key_jwt",
],
"response_modes_supported": ["query", "fragment", "form_post"],
"subject_types_supported": ["public", "pairwise", "ephemeral"],
"grant_types_supported": [
"authorization_code",
"implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer",
"refresh_token",
],
"claim_types_supported": ["normal", "aggregated", "distributed"],
"claims_parameter_supported": True,
"request_parameter_supported": True,
"request_uri_parameter_supported": True,
}
AUTH_REQ = AuthorizationRequest(
client_id="client_1",
redirect_uri="{}cb".format(ISS),
scope=["openid"],
state="STATE",
response_type="code",
client_secret="hemligt",
)
AUTH_REQ_DICT = AUTH_REQ.to_dict()
BASEDIR = os.path.abspath(os.path.dirname(__file__))
def full_path(local_file):
return os.path.join(BASEDIR, local_file)
USERINFO_db = json.loads(open(full_path("users.json")).read())
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_endpoint(self):
conf = {
"issuer": ISS,
"password": "mycket hemlig zebra",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": CAPABILITIES,
"keys": {"uri_path": "jwks.json", "key_defs": KEYDEFS},
"endpoint": {
"provider_config": {
"path": "{}/.well-known/openid-configuration",
"class": ProviderConfiguration,
"kwargs": {"client_authn_method": None},
},
"registration": {
"path": "{}/registration",
"class": Registration,
"kwargs": {"client_authn_method": None},
},
"authorization": {
"path": "{}/authorization",
"class": Authorization,
"kwargs": {"client_authn_method": None},
},
"token": {"path": "{}/token", "class": Token, "kwargs": {}},
"userinfo": {
"path": "{}/userinfo",
"class": userinfo.UserInfo,
"kwargs": {"db_file": "users.json"},
},
"session": {
"path": "{}/end_session",
"class": Session,
"kwargs": {
"post_logout_uri_path": "post_logout",
"signing_alg": "ES256",
"logout_verify_url": "{}/verify_logout".format(ISS),
"client_authn_method": None,
},
},
},
"authentication": {
"anon": {
"acr": INTERNETPROTOCOLPASSWORD,
"class": "oidcendpoint.user_authn.user.NoAuthn",
"kwargs": {"user": "diana"},
}
},
"userinfo": {"class": UserInfo, "kwargs": {"db": USERINFO_db}},
"template_dir": "template",
# 'cookie_name':{
# 'session': 'oidcop',
# 'register': 'oidcreg'
# }
}
cookie_conf = {
"sign_key": "ghsNKDDLshZTPn974nOsIGhedULrsqnsGoBFBLwUKuJhE2ch",
"default_values": {
"name": "oidcop",
"domain": "127.0.0.1",
"path": "/",
"max_age": 3600,
},
}
self.cd = CookieDealer(**cookie_conf)
endpoint_context = EndpointContext(conf, cookie_dealer=self.cd, keyjar=KEYJAR)
endpoint_context.cdb = {
"client_1": {
"client_secret": "hemligt",
"redirect_uris": [("{}cb".format(CLI1), None)],
"client_salt": "salted",
"token_endpoint_auth_method": "client_secret_post",
"response_types": ["code", "token", "code id_token", "id_token"],
"post_logout_redirect_uris": [("{}logout_cb".format(CLI1), "")],
},
"client_2": {
"client_secret": "hemligare",
"redirect_uris": [("{}cb".format(CLI2), None)],
"client_salt": "saltare",
"token_endpoint_auth_method": "client_secret_post",
"response_types": ["code", "token", "code id_token", "id_token"],
"post_logout_redirect_uris": [("{}logout_cb".format(CLI2), "")],
},
}
self.session_manager = endpoint_context.session_manager
self.authn_endpoint = endpoint_context.endpoint["authorization"]
self.session_endpoint = endpoint_context.endpoint["session"]
self.token_endpoint = endpoint_context.endpoint["token"]
self.user_id = "diana"
def test_end_session_endpoint(self):
# End session not allowed if no cookie and no id_token_hint is sent
# (can't determine session)
with pytest.raises(ValueError):
_ = self.session_endpoint.process_request("", cookie="FAIL")
def _create_cookie(self, session_id):
ec = self.session_endpoint.endpoint_context
return new_cookie(
ec,
sid=session_id,
cookie_name=ec.cookie_name["session"],
)
def _code_auth(self, state):
req = AuthorizationRequest(
state=state,
response_type="code",
redirect_uri="{}cb".format(CLI1),
scope=["openid"],
client_id="client_1",
)
_pr_resp = self.authn_endpoint.parse_request(req.to_dict())
return self.authn_endpoint.process_request(_pr_resp)
def _code_auth2(self, state):
req = AuthorizationRequest(
state=state,
response_type="code",
redirect_uri="{}cb".format(CLI2),
scope=["openid"],
client_id="client_2",
)
_pr_resp = self.authn_endpoint.parse_request(req.to_dict())
return self.authn_endpoint.process_request(_pr_resp)
def _auth_with_id_token(self, state):
req = AuthorizationRequest(
state=state,
response_type="id_token",
redirect_uri="{}cb".format(CLI1),
scope=["openid"],
client_id="client_1",
nonce="_nonce_",
)
_pr_resp = self.authn_endpoint.parse_request(req.to_dict())
_resp = self.authn_endpoint.process_request(_pr_resp)
part = self.session_endpoint.endpoint_context.cookie_dealer.get_cookie_value(
_resp["cookie"][0], cookie_name="oidcop"
)
# value is a base64 encoded JSON document
_cookie_info = json.loads(as_unicode(b64d(as_bytes(part[0]))))
return _resp["response_args"], _cookie_info["sid"]
def test_end_session_endpoint_with_cookie(self):
_resp = self._code_auth("1234567")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(_code)
cookie = self._create_cookie(_session_info["session_id"])
_req_args = self.session_endpoint.parse_request({"state": "1234567"})
resp = self.session_endpoint.process_request(_req_args, cookie=cookie)
# returns a signed JWT to be put in a verification web page shown to
# the user
p = urlparse(resp["redirect_location"])
qs = parse_qs(p.query)
jwt_info = self.session_endpoint.unpack_signed_jwt(qs["sjwt"][0])
assert jwt_info["sid"] == _session_info["session_id"]
assert jwt_info["redirect_uri"] == "https://example.com/post_logout"
def test_end_session_endpoint_with_cookie_and_unknown_sid(self):
# Need cookie and ID Token to figure this out
resp_args, _session_id = self._auth_with_id_token("1234567")
id_token = resp_args["id_token"]
_uid, _cid, _gid = unpack_session_key(_session_id)
cookie = self._create_cookie(session_key(_uid, "client_66", _gid))
with pytest.raises(ValueError):
self.session_endpoint.process_request({"state": "foo"}, cookie=cookie)
def test_end_session_endpoint_with_cookie_id_token_and_unknown_sid(self):
# Need cookie and ID Token to figure this out
resp_args, _session_id = self._auth_with_id_token("1234567")
id_token = resp_args["id_token"]
_uid, _cid, _gid = unpack_session_key(_session_id)
cookie = self._create_cookie(session_key(_uid, "client_66", _gid))
msg = Message(id_token=id_token)
verify_id_token(msg, keyjar=self.session_endpoint.endpoint_context.keyjar)
msg2 = Message(id_token_hint=id_token)
msg2[verified_claim_name("id_token_hint")] = msg[
verified_claim_name("id_token")
]
with pytest.raises(ValueError):
self.session_endpoint.process_request(msg2, cookie=cookie)
def test_end_session_endpoint_with_cookie_dual_login(self):
_resp = self._code_auth("1234567")
self._code_auth2("abcdefg")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(_code)
cookie = self._create_cookie(_session_info["session_id"])
resp = self.session_endpoint.process_request({"state": "abcde"}, cookie=cookie)
# returns a signed JWT to be put in a verification web page shown to
# the user
p = urlparse(resp["redirect_location"])
qs = parse_qs(p.query)
jwt_info = self.session_endpoint.unpack_signed_jwt(qs["sjwt"][0])
assert jwt_info["sid"] == _session_info["session_id"]
assert jwt_info["redirect_uri"] == "https://example.com/post_logout"
def test_end_session_endpoint_with_post_logout_redirect_uri(self):
_resp = self._code_auth("1234567")
self._code_auth2("abcdefg")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(_code)
cookie = self._create_cookie(_session_info["session_id"])
post_logout_redirect_uri = join_query(
*self.session_endpoint.endpoint_context.cdb["client_1"][
"post_logout_redirect_uris"
][0]
)
with pytest.raises(InvalidRequest):
self.session_endpoint.process_request(
{
"post_logout_redirect_uri": post_logout_redirect_uri,
"state": "abcde",
},
cookie=cookie,
)
def test_end_session_endpoint_with_wrong_post_logout_redirect_uri(self):
_resp = self._code_auth("1234567")
self._code_auth2("abcdefg")
resp_args, _session_id = self._auth_with_id_token("1234567")
id_token = resp_args["id_token"]
cookie = self._create_cookie(_session_id)
post_logout_redirect_uri = "https://demo.example.com/log_out"
msg = Message(id_token=id_token)
verify_id_token(msg, keyjar=self.session_endpoint.endpoint_context.keyjar)
with pytest.raises(RedirectURIError):
self.session_endpoint.process_request(
{
"post_logout_redirect_uri": post_logout_redirect_uri,
"state": "abcde",
"id_token_hint": id_token,
verified_claim_name("id_token_hint"): msg[
verified_claim_name("id_token")
],
},
cookie=cookie,
)
def test_back_channel_logout_no_uri(self):
self._code_auth("1234567")
res = self.session_endpoint.do_back_channel_logout(
self.session_endpoint.endpoint_context.cdb["client_1"], 0
)
assert res is None
def test_back_channel_logout(self):
self._code_auth("1234567")
_cdb = copy.copy(self.session_endpoint.endpoint_context.cdb["client_1"])
_cdb["backchannel_logout_uri"] = "https://example.com/bc_logout"
_cdb["client_id"] = "client_1"
res = self.session_endpoint.do_back_channel_logout(_cdb, "_sid_")
assert isinstance(res, tuple)
assert res[0] == "https://example.com/bc_logout"
_jwt = self.session_endpoint.unpack_signed_jwt(res[1], "RS256")
assert _jwt
assert _jwt["iss"] == ISS
assert _jwt["aud"] == ["client_1"]
assert "sid" in _jwt
def test_front_channel_logout(self):
self._code_auth("1234567")
_cdb = copy.copy(self.session_endpoint.endpoint_context.cdb["client_1"])
_cdb["frontchannel_logout_uri"] = "https://example.com/fc_logout"
_cdb["client_id"] = "client_1"
res = do_front_channel_logout_iframe(_cdb, ISS, "_sid_")
assert res == '<iframe src="https://example.com/fc_logout">'
def test_front_channel_logout_session_required(self):
self._code_auth("1234567")
_cdb = copy.copy(self.session_endpoint.endpoint_context.cdb["client_1"])
_cdb["frontchannel_logout_uri"] = "https://example.com/fc_logout"
_cdb["frontchannel_logout_session_required"] = True
_cdb["client_id"] = "client_1"
res = do_front_channel_logout_iframe(_cdb, ISS, "_sid_")
test_res = (
'<iframe src="https://example.com/fc_logout?',
"iss=https%3A%2F%2Fexample.com%2F",
"sid=_sid_",
)
for i in test_res:
assert i in res
def test_front_channel_logout_with_query(self):
self._code_auth("1234567")
_cdb = copy.copy(self.session_endpoint.endpoint_context.cdb["client_1"])
_cdb["frontchannel_logout_uri"] = "https://example.com/fc_logout?entity_id=foo"
_cdb["frontchannel_logout_session_required"] = True
_cdb["client_id"] = "client_1"
res = do_front_channel_logout_iframe(_cdb, ISS, "_sid_")
test_res = (
"<iframe",
'src="https://example.com/fc_logout?',
"entity_id=foo",
"iss=https%3A%2F%2Fexample.com%2F",
"sid=_sid_",
)
for i in test_res:
assert i in res
def test_logout_from_client_bc(self):
_resp = self._code_auth("1234567")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(
_code, client_session_info=True)
self.session_endpoint.endpoint_context.cdb["client_1"][
"backchannel_logout_uri"
] = "https://example.com/bc_logout"
self.session_endpoint.endpoint_context.cdb["client_1"]["client_id"] = "client_1"
res = self.session_endpoint.logout_from_client(_session_info["session_id"])
assert set(res.keys()) == {"blu"}
assert set(res["blu"].keys()) == {"client_1"}
_spec = res["blu"]["client_1"]
assert _spec[0] == "https://example.com/bc_logout"
_jwt = self.session_endpoint.unpack_signed_jwt(_spec[1], "RS256")
assert _jwt
assert _jwt["iss"] == ISS
assert _jwt["aud"] == ["client_1"]
assert "sid" in _jwt # This session ID is not the same as the session_id mentioned above
_sid = self.session_endpoint._decrypt_sid(_jwt["sid"])
assert _sid == _session_info["session_id"]
assert _session_info["client_session_info"].is_revoked()
def test_logout_from_client_fc(self):
_resp = self._code_auth("1234567")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(
_code, client_session_info=True)
# del self.session_endpoint.endpoint_context.cdb['client_1']['backchannel_logout_uri']
self.session_endpoint.endpoint_context.cdb["client_1"][
"frontchannel_logout_uri"
] = "https://example.com/fc_logout"
self.session_endpoint.endpoint_context.cdb["client_1"]["client_id"] = "client_1"
res = self.session_endpoint.logout_from_client(_session_info["session_id"])
assert set(res.keys()) == {"flu"}
assert set(res["flu"].keys()) == {"client_1"}
_spec = res["flu"]["client_1"]
assert _spec == '<iframe src="https://example.com/fc_logout">'
assert _session_info["client_session_info"].is_revoked()
def test_logout_from_client(self):
_resp = self._code_auth("1234567")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(
_code, client_session_info=True)
self._code_auth2("abcdefg")
# client0
self.session_endpoint.endpoint_context.cdb["client_1"][
"backchannel_logout_uri"
] = "https://example.com/bc_logout"
self.session_endpoint.endpoint_context.cdb["client_1"]["client_id"] = "client_1"
self.session_endpoint.endpoint_context.cdb["client_2"][
"frontchannel_logout_uri"
] = "https://example.com/fc_logout"
self.session_endpoint.endpoint_context.cdb["client_2"]["client_id"] = "client_2"
res = self.session_endpoint.logout_all_clients(_session_info["session_id"])
assert res
assert set(res.keys()) == {"blu", "flu"}
assert set(res["flu"].keys()) == {"client_2"}
_spec = res["flu"]["client_2"]
assert _spec == '<iframe src="https://example.com/fc_logout">'
assert set(res["blu"].keys()) == {"client_1"}
_spec = res["blu"]["client_1"]
assert _spec[0] == "https://example.com/bc_logout"
_jwt = self.session_endpoint.unpack_signed_jwt(_spec[1], "RS256")
assert _jwt
assert _jwt["iss"] == ISS
assert _jwt["aud"] == ["client_1"]
# both should be revoked
assert _session_info["client_session_info"].is_revoked()
_cinfo = self.session_manager[session_key(self.user_id, "client_2")]
assert _cinfo.is_revoked()
def test_do_verified_logout(self):
with responses.RequestsMock() as rsps:
rsps.add("POST", "https://example.com/bc_logout", body="OK", status=200)
_resp = self._code_auth("1234567")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(_code)
_cdb = self.session_endpoint.endpoint_context.cdb
_cdb["client_1"]["backchannel_logout_uri"] = "https://example.com/bc_logout"
_cdb["client_1"]["client_id"] = "client_1"
res = self.session_endpoint.do_verified_logout(_session_info["session_id"])
assert res == []
def test_logout_from_client_unknow_sid(self):
_resp = self._code_auth("1234567")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(_code)
self._code_auth2("abcdefg")
_uid, _cid, _gid = unpack_session_key(_session_info["session_id"])
_sid = session_key('babs', _cid, _gid)
with pytest.raises(KeyError):
res = self.session_endpoint.logout_all_clients(_sid)
def test_logout_from_client_no_session(self):
_resp = self._code_auth("1234567")
_code = _resp["response_args"]["code"]
_session_info = self.session_manager.get_session_info_by_token(_code)
self._code_auth2("abcdefg")
# client0
self.session_endpoint.endpoint_context.cdb["client_1"][
"backchannel_logout_uri"
] = "https://example.com/bc_logout"
self.session_endpoint.endpoint_context.cdb["client_1"]["client_id"] = "client_1"
self.session_endpoint.endpoint_context.cdb["client_2"][
"frontchannel_logout_uri"
] = "https://example.com/fc_logout"
self.session_endpoint.endpoint_context.cdb["client_2"]["client_id"] = "client_2"
_uid, _cid, _gid = unpack_session_key(_session_info["session_id"])
self.session_endpoint.endpoint_context.session_manager.delete([_uid, _cid])
with pytest.raises(ValueError):
self.session_endpoint.logout_all_clients(_session_info["session_id"])
|
the-stack_106_15756
|
import pandas as pd
import math
import sys
def file(filename):
try:
return pd.read_csv(filename)
except IOError:
raise Exception("Datafile doesn't exist\n")
def digit(i):
try:
return float(i)
except ValueError:
raise Exception("Enter numeric Data\n")
def main():
filename = sys.argv[1]
weight = sys.argv[2]
impact = sys.argv[3]
dataset = file(filename)
x = pd.DataFrame(dataset.iloc[:,1:].values)
'''weight = input("enter weight")'''
weight_int = weight.split(",")
for i in range(0,len(weight_int)):
weight_int[i] = digit(weight_int[i])
if(len(weight)<len(x.columns)):
raise Exception("Number of weights are less than number of columns \n")
weight_int = [float(i) for i in weight_int]
total=0
for i in range(0,len(weight_int)):
total = total + weight_int[i]
'total = sum(weight_int)'
weight_int = [float(i/total) for i in weight_int]
'''impact = input("enter impact")'''
impact = impact.split(",")
if(len(impact)<len(x.columns)):
raise Exception("Number of impact parameter is less than number of columns\n")
v_plus=[]
v_minus=[]
for column in x.columns:
square = x[column].pow(2).sum()
square = math.sqrt(square)
x[column]=x[column]*weight_int[column]/square
if(impact[column] == '+'):
v_plus.append(x[column].max())
v_minus.append(x[column].min())
elif(impact[column] == '-'):
v_plus.append(x[column].min())
v_minus.append(x[column].max())
row_length = len(x)
p=[]
for i in range(row_length):
a_plus = 0
a_minus = 0
for j in x.columns:
a_plus =a_plus+ (x.iloc[i,j]-v_plus[j])**2
a_minus = a_minus+ (x.iloc[i,j]-v_minus[j])**2
a_plus = math.sqrt(a_plus)
a_minus = math.sqrt(a_minus)
a_plus = a_plus+a_minus
p.append(a_minus/(a_plus))
d = pd.DataFrame(p)
d = d.rank(method = 'first', ascending = False)
p = pd.DataFrame(p)
p.columns = ['Performance']
print(p)
#print("Rank of items is \n")
d.columns = ['Rank']
print(d)
#m = d.min()
index = 0
min_element = d.iloc[0,0]
for i in range(1,len(d)):
if(d.iloc[i,0]<min_element):
index = i
min_element = d.iloc[i,0]
print("Best choice is item %d"% (index+1))
|
the-stack_106_15758
|
from typing import Optional, Tuple, List, Dict
import blspy
import clvm
from clvm.EvalError import EvalError
from clvm.casts import int_from_bytes
from clvm.subclass_sexp import BaseSExp
from src.types.condition_var_pair import ConditionVarPair
from src.types.condition_opcodes import ConditionOpcode
from src.types.BLSSignature import BLSSignature, BLSPublicKey
from src.types.coin import Coin
from src.types.program import Program
from src.types.sized_bytes import bytes32
from src.util.ints import uint64
from src.util.errors import Err, ConsensusError
def parse_sexp_to_condition(
sexp: BaseSExp,
) -> Tuple[Optional[Err], Optional[ConditionVarPair]]:
"""
Takes a ChiaLisp sexp and returns a ConditionVarPair.
If it fails, returns an Error
"""
if not sexp.listp():
return Err.SEXP_ERROR, None
items = sexp.as_python()
if not isinstance(items[0], bytes):
return Err.INVALID_CONDITION, None
try:
opcode = ConditionOpcode(items[0])
except ValueError:
return Err.INVALID_CONDITION, None
if len(items) == 3:
return None, ConditionVarPair(opcode, items[1], items[2])
return None, ConditionVarPair(opcode, items[1], None)
def parse_sexp_to_conditions(
sexp: BaseSExp,
) -> Tuple[Optional[Err], Optional[List[ConditionVarPair]]]:
"""
Takes a ChiaLisp sexp (list) and returns the list of ConditionVarPairs
If it fails, returns as Error
"""
results: List[ConditionVarPair] = []
try:
for _ in sexp.as_iter():
error, cvp = parse_sexp_to_condition(_)
if error:
return error, None
results.append(cvp) # type: ignore # noqa
except ConsensusError:
return Err.INVALID_CONDITION, None
return None, results
def conditions_by_opcode(
conditions: List[ConditionVarPair],
) -> Dict[ConditionOpcode, List[ConditionVarPair]]:
"""
Takes a list of ConditionVarPairs(CVP) and return dictionary of CVPs keyed of their opcode
"""
d: Dict[ConditionOpcode, List[ConditionVarPair]] = {}
cvp: ConditionVarPair
for cvp in conditions:
if cvp.opcode not in d:
d[cvp.opcode] = list()
d[cvp.opcode].append(cvp)
return d
def hash_key_pairs_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionVarPair]], coin_name: bytes32
) -> List[BLSSignature.PkMessagePair]:
pairs: List[BLSSignature.PkMessagePair] = []
for cvp in conditions_dict.get(ConditionOpcode.AGG_SIG, []):
# TODO: check types
# assert len(_) == 3
blspubkey: BLSPublicKey = BLSPublicKey(cvp.var1)
message: bytes32 = bytes32(blspy.Util.hash256(cvp.var2))
pairs.append(BLSSignature.PkMessagePair(blspubkey, message))
for cvp in conditions_dict.get(ConditionOpcode.AGG_SIG_ME, []):
aggsigme_blspubkey: BLSPublicKey = BLSPublicKey(cvp.var1)
aggsigme_message: bytes32 = bytes32(blspy.Util.hash256(cvp.var2 + coin_name))
pairs.append(BLSSignature.PkMessagePair(aggsigme_blspubkey, aggsigme_message))
return pairs
def aggsig_in_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionVarPair]]
) -> List[ConditionVarPair]:
agg_sig_conditions = []
for _ in conditions_dict.get(ConditionOpcode.AGG_SIG, []):
agg_sig_conditions.append(_)
return agg_sig_conditions
def created_outputs_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionVarPair]],
input_coin_name: bytes32,
) -> List[Coin]:
output_coins = []
for cvp in conditions_dict.get(ConditionOpcode.CREATE_COIN, []):
# TODO: check condition very carefully
# (ensure there are the correct number and type of parameters)
# maybe write a type-checking framework for conditions
# and don't just fail with asserts
puzzle_hash, amount_bin = cvp.var1, cvp.var2
amount = int_from_bytes(amount_bin)
coin = Coin(input_coin_name, puzzle_hash, amount)
output_coins.append(coin)
return output_coins
def conditions_dict_for_solution(
solution,
) -> Tuple[
Optional[Err], Optional[Dict[ConditionOpcode, List[ConditionVarPair]]], uint64
]:
error, result, cost = conditions_for_solution(solution)
if error or result is None:
return error, None, uint64(0)
return None, conditions_by_opcode(result), cost
def conditions_for_solution(
solution_program, run_program=clvm.run_program
) -> Tuple[Optional[Err], Optional[List[ConditionVarPair]], uint64]:
# get the standard script for a puzzle hash and feed in the solution
args = Program.to(solution_program)
try:
puzzle_sexp = args.first()
solution_sexp = args.rest().first()
cost, r = run_program(puzzle_sexp, solution_sexp)
error, result = parse_sexp_to_conditions(r)
return error, result, cost
except EvalError:
return Err.SEXP_ERROR, None, uint64(0)
|
the-stack_106_15759
|
# Copyright (c) 2005 Gavin E. Crooks <[email protected]>
#
# This software is distributed under the MIT Open Source License.
# <http://www.opensource.org/licenses/mit-license.html>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Read a multiple sequence alignment in STOCKHOLM format.
This file format is used by PFAM and HMMER. At present, all annotation
information is ignored.
See:
- http://www.cgb.ki.se/cgb/groups/sonnhammer/Stockholm.html
- HMMER manual
"""
from __future__ import absolute_import, print_function
import re
from . import *
from ..seq import *
from ..utils import *
example = """
# STOCKHOLM 1.0
#=GF ID CBS
#=GF AC PF00571
#=GF DE CBS domain
#=GF AU Bateman A
#=GF CC CBS domains are small intracellular modules mostly found
#=GF CC in 2 or four copies within a protein.
#=GF SQ 67
#=GS O31698/18-71 AC O31698
#=GS O83071/192-246 AC O83071
#=GS O83071/259-312 AC O83071
#=GS O31698/88-139 AC O31698
#=GS O31698/88-139 OS Bacillus subtilis
O83071/192-246 MTCRAQLIAVPRASSLAE..AIACAQKM....RVSRVPVYERS
#=GR O83071/192-246 SA 999887756453524252..55152525....36463774777
O83071/259-312 MQHVSAPVFVFECTRLAY..VQHKLRAH....SRAVAIVLDEY
#=GR O83071/259-312 SS CCCCCHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEEEE
O31698/18-71 MIEADKVAHVQVGNNLEH..ALLVLTKT....GYTAIPVLDPS
#=GR O31698/18-71 SS CCCHHHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEHHH
O31698/88-139 EVMLTDIPRLHINDPIMK..GFGMVINN......GFVCVENDE
#=GR O31698/88-139 SS CCCCCCCHHHHHHHHHHH..HEEEEEEE....EEEEEEEEEEH
#=GC SS_cons CCCCCHHHHHHHHHHHHH..EEEEEEEE....EEEEEEEEEEH
O31699/88-139 EVMLTDIPRLHINDPIMK..GFGMVINN......GFVCVENDE
#=GR O31699/88-139 AS ________________*__________________________
#=GR_O31699/88-139_IN ____________1______________2__________0____
//
"""
names = ("stockholm", "pfam",)
extensions = ('sth', 'stockholm', 'align')
header_line = re.compile(r'#\s+STOCKHOLM\s+1.\d\s+$')
def iterseq(fin, alphabet=None):
"""Iterate over the sequences in the file."""
# Default implementation
return iter(read(fin, alphabet))
def read(fin, alphabet=None):
alphabet = Alphabet(alphabet)
seq_ids = []
seqs = []
block_count = 0
for token in _scan(fin):
if token.typeof == "begin_block":
block_count = 0
elif token.typeof == "seq_id":
if len(seqs) <= block_count:
seq_ids.append(token.data)
seqs.append([])
elif token.typeof == "seq":
if not alphabet.alphabetic(token.data):
raise ValueError(
"Character on line: %d not in alphabet: %s : %s" %
(token.lineno, alphabet, token.data))
seqs[block_count].append(token.data)
block_count += 1
seqs = [Seq("".join(s), alphabet, name=i) for s, i in zip(seqs, seq_ids)]
return SeqList(seqs)
def _scan(fin):
header, body, block = range(3)
yield Token("begin")
state = header
for L, line in enumerate(fin):
if state == header:
if line.isspace():
continue
m = header_line.match(line)
state = body
if m is not None:
# print("header: ", m.group())
yield Token("header", m.group())
continue
else:
raise ValueError("Parse error on line: %d" % L)
if state == body:
if line.isspace():
continue
yield Token("begin_block")
state = block
# fall through to block
if state == block:
if line.isspace():
yield Token("end_block")
state = body
continue
if line.strip() == '//':
yield Token("end_block")
return
if line[0] == '#': # Comment or annotation line
continue
name_seq = line.split(None, 1) # Split into two parts at first whitespace
if len(name_seq) != 2:
raise ValueError("Parse error on line: %d" % L)
yield Token("seq_id", name_seq[0].strip())
yield Token("seq", name_seq[1].strip())
continue
# END state blocks. If I ever get here something has gone terrible wrong
raise RuntimeError()
|
the-stack_106_15760
|
'''
This script identifies drinks marked with aruco markers and calculates their total cost
User variables are to be set in the 'config' file, not within the program
Author: Fasermaler
March 2019
'''
import cv2
import numpy as np
import cv2.aruco as aruco
import csv
# Fire base imports
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
# Time import
import time
# import thread to do multithreading
import _thread
# import argparse to take in system arguments
import argparse
# Custom imports
from aruco_detector import aruco_detector
from picam_class import pi_cam
from video_reader import video_reader
from arg_parser import arg_parser
from price_calculator import price_calculator
from csv_reader import csv_reader
from pull_prices import pull_prices
class cutQ_vision_class:
def __init__(self, drinkp):
# Parses the console arguments
self.args = arg_parser()#default_imshow=False)
self.args.parse_arguments()
# start the csv reader
self.csv_read = csv_reader()
# Get the config file parameters
self.config_read = csv_reader()
print(self.config_read.get_config())
# If no video path was specified, use the pi camera as live feed
if self.args.video_path == None:
self.stream = pi_cam(self.config_read.pi_height, self.config_read.pi_width, self.config_read.pi_fps)
else:
self.stream = video_reader(str(self.args.video_path))
# Start the aruco detector
self.aruco_detector = aruco_detector()
# Start the price calculator
print(drinkp)
self.prices = price_calculator(drinkp)
self.drinks = None
self.price = None
self.drink_dict = {}
# Starts the thread to get frames
_thread.start_new_thread(self.stream.get_frame_continuous, ())
def start(self):
reset_count = 0
# encapsulate the whole program in a try except in case of termination
# try:
while True:
try:
# get the frame from the stream
frame = self.stream.frame
#self.drink_dict = {}
# get the coordinates and ids of the aruco markers
#try:
corners, ids = self.aruco_detector.return_aruco_ids(frame)
if ids != None:
self.drink_dict = {}
# calculate the prices
self.prices.calculate_price(ids)
# If the user opts to show the cv2 screen
if self.args.imshow:
print(self.prices.drinks_list)
self.aruco_detector.draw_markers(frame, corners, ids, text=self.prices.drinks_list, text_flag=True)
print(self.prices.total_price)
for i in range(len(self.drinks)):
if self.drinks[i] not in self.drink_dict.keys():
if self.drinks[i] != None:
self.drink_dict[self.drinks[i]] = (1, self.prices.pure_prices[self.drinks[i]])
else:
self.drink_dict[self.drinks[i]] = (self.drink_dict[self.drinks[i]][0] + 1 , self.prices.pure_prices[self.drinks[i]])
print(self.drink_dict)
#reset_count = 0
else:
if reset_count == 10:
self.drink_dict = {}
reset_count = 0
else:
reset_count += 1
cv2.imshow('Stream', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# updates the main class attributes
self.price = self.prices.total_price
self.drinks = self.prices.drinks_list
# except:
# #print("skipped a frame")
# pass
except Exception as e: print(e)
# except:
# # terminate the stream
# self.stream.terminate = True
cv2.destroyAllWindows()
## Test Code ##
#vision = cutQ_vision_class()
# Do note that this has no termination condition at the moment
#vision.start()
|
the-stack_106_15761
|
import os
import pygame
from pygame import gfxdraw
import sys
import math
from OpenGL.GL import *
from pygame import OPENGLBLIT
from pygame import OPENGL
from OpenGL.GLU import *
def Round(a):
return int(a + 0.5)
def mainloop():
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def Draw():
FillPoly((300, 300), 6, 100,[255,0,0],algo="Bresenham")
DrawPoly((300, 300), 6, 100,algo="Bresenham")
pygame.display.flip()
def drawDDA(p1, p2, color=[0, 0, 0]):
x0, y0, x1, y1 = p1[0], p1[1], p2[0], p2[1]
steps = abs(x0-x1) if abs(x0-x1) > abs(y0-y1) else abs(y0-y1)
dx = (x1-x0)/steps
dy = (y1-y0)/steps
x, y = x0, y0
gfxdraw.pixel(screen,Round(x),Round(y),color)
for i in range(int(steps)):
x += dx
y += dy
gfxdraw.pixel(screen,Round(x), Round(y),color)
def drawBresenham(p1, p2, color=[0, 0, 0]):
l = []
x0, y0, x1, y1 = int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])
dx = x1 - x0
dy = y1 - y0
xsign = 1 if dx > 0 else -1
ysign = 1 if dy > 0 else -1
dx = abs(dx)
dy = abs(dy)
if dx > dy:
xx, xy, yx, yy = xsign, 0, 0, ysign
else:
dx, dy = dy, dx
xx, xy, yx, yy = 0, ysign, xsign, 0
D = 2*dy - dx
y = 0
for x in range(dx + 1):
l.append((x0 + x*xx + y*yx, y0 + x*xy + y*yy))
gfxdraw.pixel(screen,x0 + x*xx + y*yx, y0 + x*xy + y*yy,color)
if D >= 0:
y += 1
D -= 2*dx
D += 2*dy
def DrawPoly(center, n, s, color=[0, 0, 0],algo="DDA"):
x0, y0 = center[0], center[1]
a = math.radians(360 / n)
d = s / 2 / math.sin(a / 2)
pts = []
bv1x = x0-s/2
bv1y = y0 - (s/2)*(1/math.tan(math.radians(180/n)))
bv2x = x0+s/2
bv2y = bv1y
for i in range(n+1):
sideAngle = math.radians((360 * i / n))
x = (bv1x-x0)*math.cos(sideAngle) + (bv1y-y0) * math.sin(sideAngle)+x0
y = (bv1x-x0)*math.sin(sideAngle) - (bv1y-y0) * math.cos(sideAngle)+y0
pts.append([x, y])
for i in range(n):
eval("draw"+algo+"(pts[i], pts[i+1], color)")
def FillPoly(center, n, s, color=[0, 0, 0],algo = "DDA"):
for i in range(1, s):
DrawPoly(center, n, i, color, algo)
size = [640, 720]
os.environ['SDL_VIDEO_CENTERED'] = '0'
pygame.init()
screen = pygame.display.set_mode(size)
screen.fill((255, 255, 255))
Draw()
mainloop()
|
the-stack_106_15764
|
from django.urls import path
from meuOrcamento.views.OrcamentoView import OrcamentoCreateView, OrcamentoUpdate, OrcamentoDelete, CategoriaCreateView, orcamento_view
urlpatterns=[
path('', orcamento_view, name='orcamento'),
path('adicionarCategoria/', CategoriaCreateView.as_view(), name='adicionarCategoria'),
path('adicionarOrcamento/', OrcamentoCreateView.as_view(), name='adicionarOrcamento'),
path('editarOrcamento/<int:pk>', OrcamentoUpdate.as_view(), name='editarOrcamento'),
path('excluirOrcamento/<int:pk>', OrcamentoDelete.as_view(), name='excluirOrcamento'),
]
|
the-stack_106_15765
|
'''Script for downloading all GLUE data.
Note: for legal reasons, we are unable to host MRPC.
You can either use the version hosted by the SentEval team, which is already tokenized,
or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually.
For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example).
You should then rename and place specific files in a folder (see below for an example).
mkdir MRPC
cabextract MSRParaphraseCorpus.msi -d MRPC
cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt
cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt
rm MRPC/_*
rm MSRParaphraseCorpus.msi
1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now.
2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray!
'''
import os
import sys
import shutil
import argparse
import tempfile
# import urllib.request
import zipfile
# TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"]
# TASK2PATH = {"CoLA":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4',
# "SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8',
# "MRPC":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc',
# "QQP":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5',
# "STS":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5',
# "MNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce',
# "SNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df',
# "QNLI": 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601',
# "RTE":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb',
# "WNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf',
# "diagnostic":'https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D'}
# MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt'
# MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt'
# def download_and_extract(task, data_dir):
# print("Downloading and extracting %s..." % task)
# data_file = "%s.zip" % task
# urllib.request.urlretrieve(TASK2PATH[task], data_file)
# with zipfile.ZipFile(data_file) as zip_ref:
# zip_ref.extractall(data_dir)
# os.remove(data_file)
# print("\tCompleted!")
def format_mrpc(data_dir, path_to_data):
print("Processing MRPC...")
mrpc_dir = os.path.join(data_dir, "MRPC")
if not os.path.isdir(mrpc_dir):
os.mkdir(mrpc_dir)
if path_to_data:
mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt")
else:
print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN)
mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file)
urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file)
assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file
assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file
# urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv"))
dev_ids = []
with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh:
for row in ids_fh:
dev_ids.append(row.strip().split('\t'))
with open(mrpc_train_file, encoding="utf8") as data_fh, \
open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding="utf8") as train_fh, \
open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding="utf8") as dev_fh:
header = data_fh.readline()
train_fh.write(header)
dev_fh.write(header)
for row in data_fh:
label, id1, id2, s1, s2 = row.strip().split('\t')
if [id1, id2] in dev_ids:
dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
else:
train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2))
with open(mrpc_test_file, encoding="utf8") as data_fh, \
open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding="utf8") as test_fh:
header = data_fh.readline()
test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n")
for idx, row in enumerate(data_fh):
label, id1, id2, s1, s2 = row.strip().split('\t')
test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2))
print("\tCompleted!")
# def download_diagnostic(data_dir):
# print("Downloading and extracting diagnostic...")
# if not os.path.isdir(os.path.join(data_dir, "diagnostic")):
# os.mkdir(os.path.join(data_dir, "diagnostic"))
# data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv")
# urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file)
# print("\tCompleted!")
# return
# def get_tasks(task_names):
# task_names = task_names.split(',')
# if "all" in task_names:
# tasks = TASKS
# else:
# tasks = []
# for task_name in task_names:
# assert task_name in TASKS, "Task %s not found!" % task_name
# tasks.append(task_name)
# return tasks
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data')
parser.add_argument('--tasks', help='tasks to download data for as a comma separated string',
type=str, default='all')
parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt',
type=str, default='')
args = parser.parse_args(arguments)
# if not os.path.isdir(args.data_dir):
# os.mkdir(args.data_dir)
# tasks = get_tasks(args.tasks)
#
# for task in tasks:
# if task == 'MRPC':
format_mrpc(args.data_dir, args.path_to_mrpc)
# elif task == 'diagnostic':
# download_diagnostic(args.data_dir)
# else:
# download_and_extract(task, args.data_dir)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_106_15766
|
from ....models.models import ListOfSpeakers, Speaker
from ....permissions.permissions import Permissions
from ....shared.patterns import Collection, FullQualifiedId
from ...generics.delete import DeleteAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData
@register_action("list_of_speakers.delete_all_speakers")
class ListOfSpeakersDeleteAllSpeakersAction(DeleteAction):
"""
Action to delete all speakers of a list of speakers.
"""
model = Speaker()
schema = DefaultSchema(ListOfSpeakers()).get_default_schema(
required_properties=["id"],
title="Delete all speakers of list of speakers",
description="Action to remove all speakers from the given list of speakers.",
)
permission = Permissions.ListOfSpeakers.CAN_MANAGE
permission_model = ListOfSpeakers()
def get_updated_instances(self, action_data: ActionData) -> ActionData:
for instance in action_data:
list_of_speakers = self.datastore.get(
FullQualifiedId(Collection("list_of_speakers"), instance["id"]),
mapped_fields=["speaker_ids"],
)
if list_of_speakers.get("speaker_ids"):
yield from [
{"id": speaker_id} for speaker_id in list_of_speakers["speaker_ids"]
]
|
the-stack_106_15767
|
import torch
import torch.nn as nn
class AsymmetricLoss(nn.Module):
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=True):
super(AsymmetricLoss, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
# Calculating Probabilities
x_sigmoid = torch.sigmoid(x)
xs_pos = x_sigmoid
xs_neg = 1 - x_sigmoid
# Asymmetric Clipping
if self.clip is not None and self.clip > 0:
xs_neg = (xs_neg + self.clip).clamp(max=1)
# Basic CE calculation
los_pos = y * torch.log(xs_pos.clamp(min=self.eps))
los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps))
loss = los_pos + los_neg
# Asymmetric Focusing
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(False)
pt0 = xs_pos * y
pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p
pt = pt0 + pt1
one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
one_sided_w = torch.pow(1 - pt, one_sided_gamma)
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(True)
loss *= one_sided_w
return -loss.sum()
class AsymmetricLossOptimized(nn.Module):
''' Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations'''
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimized, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
# prevent memory allocation and gpu uploading every iteration, and encourages inplace operations
self.targets = self.anti_targets = self.xs_pos = self.xs_neg = self.asymmetric_w = self.loss = None
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
self.targets = y
self.anti_targets = 1 - y
# Calculating Probabilities
self.xs_pos = torch.sigmoid(x)
self.xs_neg = 1.0 - self.xs_pos
# Asymmetric Clipping
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
# Basic CE calculation
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=self.eps)))
# Asymmetric Focusing
if self.gamma_neg > 0 or self.gamma_pos > 0:
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(False)
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.anti_targets)
if self.disable_torch_grad_focal_loss:
torch._C.set_grad_enabled(True)
self.loss *= self.asymmetric_w
return -self.loss.sum()
class ASLSingleLabel(nn.Module):
def __init__(self, gamma_pos=0, gamma_neg=4, eps: float = 0.1, reduction='mean'):
super(ASLSingleLabel, self).__init__()
self.eps = eps
self.logsoftmax = nn.LogSoftmax(dim=-1)
self.targets_classes = [] # prevent gpu repeated memory allocation
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.reduction = reduction
def forward(self, inputs, target, reduction=None):
num_classes = inputs.size()[-1]
log_preds = self.logsoftmax(inputs)
self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1)
# ASL weights
targets = self.targets_classes
anti_targets = 1 - targets
xs_pos = torch.exp(log_preds)
xs_neg = 1 - xs_pos
xs_pos = xs_pos * targets
xs_neg = xs_neg * anti_targets
asymmetric_w = torch.pow(1 - xs_pos - xs_neg,
self.gamma_pos * targets + self.gamma_neg * anti_targets)
log_preds = log_preds * asymmetric_w
if self.eps > 0: # label smoothing
self.targets_classes.mul_(1 - self.eps).add_(self.eps / num_classes)
# loss calculation
loss = - self.targets_classes.mul(log_preds)
loss = loss.sum(dim=-1)
if self.reduction == 'mean':
loss = loss.mean()
return loss
|
the-stack_106_15768
|
from decimal import Decimal
from typing import Dict, List, Optional, Tuple, Union
from ..account import events as account_events
from ..account.models import User
from ..order.models import Fulfillment, FulfillmentLine, Order, OrderLine
from ..payment.models import Payment
from . import OrderEvents, OrderEventsEmails
from .models import OrderEvent
UserType = Optional[User]
def _lines_per_quantity_to_line_object_list(quantities_per_order_line):
return [
{"quantity": quantity, "line_pk": line.pk, "item": str(line)}
for quantity, line in quantities_per_order_line
]
def _get_payment_data(amount: Optional[Decimal], payment: Payment) -> Dict:
return {
"parameters": {
"amount": amount,
"payment_id": payment.token,
"payment_gateway": payment.gateway,
}
}
def _user_is_valid(user: UserType) -> bool:
return bool(user and not user.is_anonymous)
def email_sent_event(
*,
order: Order,
user: Optional[UserType],
email_type: str, # use "OrderEventsEmails" class
user_pk: int = None,
) -> OrderEvent:
if user and not user.is_anonymous:
kwargs: Dict[str, Union[User, int]] = {"user": user}
elif user_pk:
kwargs = {"user_id": user_pk}
else:
kwargs = {}
return OrderEvent.objects.create(
order=order,
type=OrderEvents.EMAIL_SENT,
parameters={"email": order.get_customer_email(), "email_type": email_type},
**kwargs,
)
def invoice_requested_event(
*,
order: Order,
user: Optional[UserType],
) -> OrderEvent:
return OrderEvent.objects.create(
order=order, type=OrderEvents.INVOICE_REQUESTED, user=user
)
def invoice_generated_event(
*,
order: Order,
user: Optional[UserType],
invoice_number: str,
) -> OrderEvent:
return OrderEvent.objects.create(
order=order,
type=OrderEvents.INVOICE_GENERATED,
user=user,
parameters={"invoice_number": invoice_number},
)
def invoice_updated_event(
*,
order: Order,
user: Optional[UserType],
invoice_number: str,
url: str,
status: str
) -> OrderEvent:
return OrderEvent.objects.create(
order=order,
type=OrderEvents.INVOICE_UPDATED,
user=user,
parameters={"invoice_number": invoice_number, "url": url, "status": status},
)
def invoice_sent_event(
*,
order: Order,
user: Optional[UserType],
email: str,
) -> OrderEvent:
return OrderEvent.objects.create(
order=order,
type=OrderEvents.INVOICE_SENT,
user=user,
parameters={"email": email},
)
def email_resent_event(
*, order: Order, user: UserType, email_type: OrderEventsEmails
) -> OrderEvent:
raise NotImplementedError
def draft_order_created_event(*, order: Order, user: UserType) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order, type=OrderEvents.DRAFT_CREATED, user=user
)
def draft_order_added_products_event(
*, order: Order, user: UserType, order_lines: List[Tuple[int, OrderLine]]
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.DRAFT_ADDED_PRODUCTS,
user=user,
parameters={"lines": _lines_per_quantity_to_line_object_list(order_lines)},
)
def draft_order_removed_products_event(
*, order: Order, user: UserType, order_lines: List[Tuple[int, OrderLine]]
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.DRAFT_REMOVED_PRODUCTS,
user=user,
parameters={"lines": _lines_per_quantity_to_line_object_list(order_lines)},
)
def order_created_event(
*, order: Order, user: UserType, from_draft=False
) -> OrderEvent:
if from_draft:
event_type = OrderEvents.PLACED_FROM_DRAFT
else:
event_type = OrderEvents.PLACED
account_events.customer_placed_order_event(
user=user, # type: ignore
order=order,
)
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(order=order, type=event_type, user=user)
def order_confirmed_event(
*, order: Order, user: UserType, from_draft=False
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(order=order, type=OrderEvents.CONFIRMED, user=user)
def draft_order_oversold_items_event(
*, order: Order, user: UserType, oversold_items: List[str]
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.OVERSOLD_ITEMS,
user=user,
parameters={"oversold_items": oversold_items},
)
def order_canceled_event(*, order: Order, user: UserType) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(order=order, type=OrderEvents.CANCELED, user=user)
def order_manually_marked_as_paid_event(
*, order: Order, user: UserType, transaction_reference: Optional[str] = None
) -> OrderEvent:
if not _user_is_valid(user):
user = None
parameters = {} # type: ignore
if transaction_reference:
parameters = {"transaction_reference": transaction_reference}
return OrderEvent.objects.create(
order=order,
type=OrderEvents.ORDER_MARKED_AS_PAID,
user=user,
parameters=parameters,
)
def order_fully_paid_event(*, order: Order, user: UserType) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order, type=OrderEvents.ORDER_FULLY_PAID, user=user
)
def payment_authorized_event(
*, order: Order, user: UserType, amount: Decimal, payment: Payment
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.PAYMENT_AUTHORIZED,
user=user,
**_get_payment_data(amount, payment),
)
def payment_captured_event(
*, order: Order, user: UserType, amount: Decimal, payment: Payment
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.PAYMENT_CAPTURED,
user=user,
**_get_payment_data(amount, payment),
)
def payment_refunded_event(
*, order: Order, user: UserType, amount: Decimal, payment: Payment
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.PAYMENT_REFUNDED,
user=user,
**_get_payment_data(amount, payment),
)
def payment_voided_event(
*, order: Order, user: UserType, payment: Payment
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.PAYMENT_VOIDED,
user=user,
**_get_payment_data(None, payment),
)
def payment_failed_event(
*, order: Order, user: UserType, message: str, payment: Payment
) -> OrderEvent:
if not _user_is_valid(user):
user = None
parameters = {"message": message}
if payment:
parameters.update({"gateway": payment.gateway, "payment_id": payment.token})
return OrderEvent.objects.create(
order=order, type=OrderEvents.PAYMENT_FAILED, user=user, parameters=parameters
)
def external_notification_event(
*, order: Order, user: UserType, message: Optional[str], parameters: Optional[dict]
) -> OrderEvent:
if not _user_is_valid(user):
user = None
parameters = parameters or {}
parameters["message"] = message
return OrderEvent.objects.create(
order=order,
type=OrderEvents.EXTERNAL_SERVICE_NOTIFICATION,
user=user,
parameters=parameters,
)
def fulfillment_canceled_event(
*, order: Order, user: UserType, fulfillment: Fulfillment
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.FULFILLMENT_CANCELED,
user=user,
parameters={"composed_id": fulfillment.composed_id},
)
def fulfillment_restocked_items_event(
*,
order: Order,
user: UserType,
fulfillment: Union[Order, Fulfillment],
warehouse_pk: Optional[int] = None,
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.FULFILLMENT_RESTOCKED_ITEMS,
user=user,
parameters={
"quantity": fulfillment.get_total_quantity(),
"warehouse": warehouse_pk,
},
)
def fulfillment_fulfilled_items_event(
*, order: Order, user: UserType, fulfillment_lines: List[FulfillmentLine]
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.FULFILLMENT_FULFILLED_ITEMS,
user=user,
parameters={"fulfilled_items": [line.pk for line in fulfillment_lines]},
)
def fulfillment_refunded_event(
*,
order: Order,
user: UserType,
refunded_lines: List[Tuple[int, OrderLine]],
amount: Decimal,
shipping_costs_included: bool
):
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.FULFILLMENT_REFUNDED,
user=user,
parameters={
"lines": _lines_per_quantity_to_line_object_list(refunded_lines),
"amount": amount,
"shipping_costs_included": shipping_costs_included,
},
)
def fulfillment_tracking_updated_event(
*, order: Order, user: UserType, tracking_number: str, fulfillment: Fulfillment
) -> OrderEvent:
if not _user_is_valid(user):
user = None
return OrderEvent.objects.create(
order=order,
type=OrderEvents.TRACKING_UPDATED,
user=user,
parameters={
"tracking_number": tracking_number,
"fulfillment": fulfillment.composed_id,
},
)
def order_note_added_event(*, order: Order, user: UserType, message: str) -> OrderEvent:
kwargs = {}
if user is not None and not user.is_anonymous:
if order.user is not None and order.user.pk == user.pk:
account_events.customer_added_to_note_order_event(
user=user, order=order, message=message
)
kwargs["user"] = user
return OrderEvent.objects.create(
order=order,
type=OrderEvents.NOTE_ADDED,
parameters={"message": message},
**kwargs,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.